From c89adc86a6ca975416d8b7b5498e29a44a718e8e Mon Sep 17 00:00:00 2001 From: j0ntz Date: Tue, 24 Mar 2026 16:05:35 -0700 Subject: [PATCH 1/6] Initial bootstrap: skills, rules, scripts, and setup Synced from ~/.cursor/ with CLAUDE.md generation and skills symlink for Claude Code compatibility. Includes setup.sh for deploying to new machines. --- .claude/CLAUDE.md | 138 ++++++ .claude/skills | 1 + .cursor/.syncignore | 6 + .cursor/README.md | 364 +++++++++++++++ .cursor/commands/github-pr-hudl.sh | 389 ++++++++++++++++ .cursor/commands/hudl.md | 229 ++++++++++ .cursor/rules/after_each_chat.mdc | 24 + .cursor/rules/answer-questions-first.mdc | 18 + .cursor/rules/eslint-warnings.mdc | 10 + .cursor/rules/load-standards-by-filetype.mdc | 19 + .cursor/rules/no-format-lint.mdc | 11 + .cursor/rules/review-standards.mdc | 199 ++++++++ .cursor/rules/typescript-standards.mdc | 269 +++++++++++ .cursor/rules/workflow-halt-on-error.mdc | 84 ++++ .cursor/scripts/port-to-opencode.sh | 224 +++++++++ .cursor/scripts/pr-status-gql.sh | 429 ++++++++++++++++++ .cursor/scripts/pr-status.sh | 407 +++++++++++++++++ .cursor/scripts/pr-watch.sh | 88 ++++ .cursor/scripts/push-env-key.sh | 63 +++ .cursor/scripts/tool-sync.sh | 406 +++++++++++++++++ .cursor/skills/asana-get-context.sh | 232 ++++++++++ .cursor/skills/asana-plan/SKILL.md | 60 +++ .cursor/skills/asana-task-update/SKILL.md | 89 ++++ .../scripts/asana-task-update.sh | 268 +++++++++++ .cursor/skills/asana-whoami.sh | 48 ++ .cursor/skills/author/SKILL.md | 145 ++++++ .cursor/skills/changelog/SKILL.md | 10 + .cursor/skills/chat-audit/SKILL.md | 102 +++++ .../chat-audit/scripts/cursor-chat-extract.js | 142 ++++++ .cursor/skills/convention-sync/SKILL.md | 85 ++++ .../scripts/convention-sync.sh | 259 +++++++++++ .../scripts/generate-claude-md.sh | 85 ++++ .cursor/skills/dep-pr/SKILL.md | 103 +++++ .../dep-pr/scripts/asana-create-dep-task.sh | 245 ++++++++++ .cursor/skills/fix-eslint/SKILL.md | 108 +++++ .cursor/skills/im/SKILL.md | 164 +++++++ .cursor/skills/im/scripts/lint-warnings.sh | 260 +++++++++++ .cursor/skills/im/scripts/reorder-commits.sh | 108 +++++ .cursor/skills/install-deps.sh | 29 ++ .cursor/skills/lint-commit.sh | 327 +++++++++++++ .cursor/skills/one-shot/SKILL.md | 70 +++ .cursor/skills/pr-address/SKILL.md | 199 ++++++++ .../skills/pr-address/scripts/pr-address.sh | 331 ++++++++++++++ .cursor/skills/pr-create/SKILL.md | 106 +++++ .cursor/skills/pr-create/scripts/pr-create.sh | 331 ++++++++++++++ .cursor/skills/pr-land/SKILL.md | 354 +++++++++++++++ .cursor/skills/pr-land/scripts/edge-repo.js | 152 +++++++ .../pr-land/scripts/pr-land-comments.sh | 189 ++++++++ .../pr-land/scripts/pr-land-discover.sh | 132 ++++++ .../scripts/pr-land-extract-asana-task.sh | 92 ++++ .../skills/pr-land/scripts/pr-land-merge.sh | 429 ++++++++++++++++++ .../skills/pr-land/scripts/pr-land-prepare.sh | 263 +++++++++++ .../skills/pr-land/scripts/pr-land-publish.sh | 293 ++++++++++++ .cursor/skills/pr-land/scripts/upgrade-dep.sh | 60 +++ .cursor/skills/pr-review/SKILL.md | 123 +++++ .../pr-review/scripts/github-pr-review.sh | 101 +++++ .cursor/skills/q/SKILL.md | 88 ++++ .cursor/skills/staging-cherry-pick/SKILL.md | 91 ++++ .../scripts/staging-cherry-pick.sh | 306 +++++++++++++ .cursor/skills/standup/SKILL.md | 255 +++++++++++ .../skills/standup/scripts/asana-standup.sh | 282 ++++++++++++ .../standup/scripts/github-pr-activity.sh | 206 +++++++++ .cursor/skills/task-review/SKILL.md | 102 +++++ .cursor/skills/verify-repo.sh | 348 ++++++++++++++ .gitignore | 5 + README.md | 33 ++ scripts/setup.sh | 84 ++++ 67 files changed, 11272 insertions(+) create mode 100644 .claude/CLAUDE.md create mode 120000 .claude/skills create mode 100644 .cursor/.syncignore create mode 100644 .cursor/README.md create mode 100755 .cursor/commands/github-pr-hudl.sh create mode 100644 .cursor/commands/hudl.md create mode 100644 .cursor/rules/after_each_chat.mdc create mode 100644 .cursor/rules/answer-questions-first.mdc create mode 100644 .cursor/rules/eslint-warnings.mdc create mode 100644 .cursor/rules/load-standards-by-filetype.mdc create mode 100644 .cursor/rules/no-format-lint.mdc create mode 100644 .cursor/rules/review-standards.mdc create mode 100644 .cursor/rules/typescript-standards.mdc create mode 100644 .cursor/rules/workflow-halt-on-error.mdc create mode 100755 .cursor/scripts/port-to-opencode.sh create mode 100755 .cursor/scripts/pr-status-gql.sh create mode 100755 .cursor/scripts/pr-status.sh create mode 100755 .cursor/scripts/pr-watch.sh create mode 100755 .cursor/scripts/push-env-key.sh create mode 100755 .cursor/scripts/tool-sync.sh create mode 100755 .cursor/skills/asana-get-context.sh create mode 100644 .cursor/skills/asana-plan/SKILL.md create mode 100644 .cursor/skills/asana-task-update/SKILL.md create mode 100755 .cursor/skills/asana-task-update/scripts/asana-task-update.sh create mode 100755 .cursor/skills/asana-whoami.sh create mode 100644 .cursor/skills/author/SKILL.md create mode 100644 .cursor/skills/changelog/SKILL.md create mode 100644 .cursor/skills/chat-audit/SKILL.md create mode 100755 .cursor/skills/chat-audit/scripts/cursor-chat-extract.js create mode 100644 .cursor/skills/convention-sync/SKILL.md create mode 100755 .cursor/skills/convention-sync/scripts/convention-sync.sh create mode 100755 .cursor/skills/convention-sync/scripts/generate-claude-md.sh create mode 100644 .cursor/skills/dep-pr/SKILL.md create mode 100755 .cursor/skills/dep-pr/scripts/asana-create-dep-task.sh create mode 100644 .cursor/skills/fix-eslint/SKILL.md create mode 100644 .cursor/skills/im/SKILL.md create mode 100755 .cursor/skills/im/scripts/lint-warnings.sh create mode 100755 .cursor/skills/im/scripts/reorder-commits.sh create mode 100755 .cursor/skills/install-deps.sh create mode 100755 .cursor/skills/lint-commit.sh create mode 100644 .cursor/skills/one-shot/SKILL.md create mode 100644 .cursor/skills/pr-address/SKILL.md create mode 100755 .cursor/skills/pr-address/scripts/pr-address.sh create mode 100644 .cursor/skills/pr-create/SKILL.md create mode 100755 .cursor/skills/pr-create/scripts/pr-create.sh create mode 100644 .cursor/skills/pr-land/SKILL.md create mode 100644 .cursor/skills/pr-land/scripts/edge-repo.js create mode 100755 .cursor/skills/pr-land/scripts/pr-land-comments.sh create mode 100755 .cursor/skills/pr-land/scripts/pr-land-discover.sh create mode 100755 .cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh create mode 100755 .cursor/skills/pr-land/scripts/pr-land-merge.sh create mode 100755 .cursor/skills/pr-land/scripts/pr-land-prepare.sh create mode 100755 .cursor/skills/pr-land/scripts/pr-land-publish.sh create mode 100755 .cursor/skills/pr-land/scripts/upgrade-dep.sh create mode 100644 .cursor/skills/pr-review/SKILL.md create mode 100755 .cursor/skills/pr-review/scripts/github-pr-review.sh create mode 100644 .cursor/skills/q/SKILL.md create mode 100644 .cursor/skills/staging-cherry-pick/SKILL.md create mode 100755 .cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh create mode 100644 .cursor/skills/standup/SKILL.md create mode 100755 .cursor/skills/standup/scripts/asana-standup.sh create mode 100755 .cursor/skills/standup/scripts/github-pr-activity.sh create mode 100644 .cursor/skills/task-review/SKILL.md create mode 100755 .cursor/skills/verify-repo.sh create mode 100644 .gitignore create mode 100644 README.md create mode 100755 scripts/setup.sh diff --git a/.claude/CLAUDE.md b/.claude/CLAUDE.md new file mode 100644 index 0000000..7b819b6 --- /dev/null +++ b/.claude/CLAUDE.md @@ -0,0 +1,138 @@ +# Global Rules + +# Auto-generated from ~/.cursor/rules/ (alwaysApply: true files only). +# Do not edit manually. Re-generate via convention-sync. + +--- + +## answer-questions-first + +# Answer Questions Before Acting + +Before using any code editing tools, scan the user's message for `?` characters and determine if it's a question. + +- **Ignore** `?` inside code, URLs or query parameters (e.g. `?param=x`, `?key=value` , `const x = ifTrue ? 'yes' : 'no'`) +- **Treat all other `?`** as question statements, if they appear to be questions. + +If questions are detected: + +1. Read `~/.cursor/skills/q/SKILL.md` and follow its workflow to answer every question. +2. **Workflow context**: If a skill was invoked earlier in this conversation, note which one. When a question or critique references agent behavior from that execution, load the skill definition before answering and evaluate whether the skill should have governed that behavior. If it should have but didn't, that's a workflow gap — treat it as the primary concern per `fix-workflow-first.mdc`. +3. Do **not** edit files, create files, or run mutating commands until the user responds. +4. Only proceed with implementation after the user permits it in a follow-up message. + +--- + +## load-standards-by-filetype + +Load language-specific coding standards before editing or investigating lint/type errors in files, without redundant reads. + + +Before using any code editing tool on a file OR investigating lint/type errors in that file type, check if the matching standards rule is already present in `cursor_rules_context`. Only read the rule file if it is NOT already in context. +If the rule is not in context, read it using the Read tool and follow its contents BEFORE making the edit or investigating the error. + + + + +| File glob | Standards file | +|---|----| +| `**/*.ts`,`**/*.tsx` | `~/.cursor/rules/typescript-standards.mdc` | + + + +--- + +## no-format-lint + +# No Manual Formatting or Lint Fixing + +- Do NOT run `yarn lint`, `yarn fix`, `yarn verify`, or any lint/format shell commands unless explicitly asked. +- Do NOT manually fix formatting issues (whitespace, quotes, semicolons, trailing commas, line length). The `lint-commit.sh` script runs `eslint --fix` (including Prettier) before each commit. +- Only use `ReadLints` to check for logical or type errors, not formatting. If the only lint errors are formatting-related, ignore them. +- Focus tokens on correctness and logic, not style. + +--- + +## workflow-halt-on-error + + + +All workflow-related skill definitions (`*.md` / `SKILL.md`) and workflow companion scripts (`*.sh`) are sourced from `~/.cursor/`. When executing skills, prefer explicit `~/.cursor/...` paths and do not assume repo-local workflow files unless the skill explicitly points to one. + +When a skill mentions a script path, resolve it under `~/.cursor/skills//scripts/` unless the skill explicitly specifies an absolute path elsewhere. Do not assume repo-relative `scripts/` paths without verifying the skill directory contents. + +When ANY shell command fails (non-zero exit code) while executing an active skill workflow, a delegated subskill from that workflow, or a companion-script step required by that workflow (except where explicitly allowed by `auto-fix-verification-failures` or `companion-script-nonzero-contracts`): +1. **STOP** — do not retry, work around, substitute, or continue the workflow. +2. **Report** — show the user the exact command, exit code, and error output. +3. **Diagnose** — classify the failure: missing tool (`command not found`), wrong path, permissions, or logic error. +4. **Evaluate workflow** — if the failure reveals a gap in a skill definition, follow the fix-workflow-first rules below. +5. **Wait** — do not resume until the user responds. + + +When a workflow gap is discovered in an active skill definition: +1. **Stop immediately** — do not continue the current task or apply any workaround. +2. **Identify the root cause** in the skill (`.cursor/skills/*/SKILL.md`) definition. +3. **Propose the fix** to the user and wait for approval before proceeding. +4. **Fix the skill** using `/author` after approval. +5. **Resume the original task** only after the skill is updated. + +Fixing the skill takes **absolute priority** over all other actions — including workarounds, continuing the original task, or applying temporary fixes. Do NOT apply workarounds or manual fixes before proposing the skill update. The correct sequence is: identify gap → propose fix → get approval → apply fix → then resume original task. This applies to all workflow issues — missed steps, incorrect output, wrong tool usage, shell failures, formatting problems, etc. The skill is the source of truth; patching around it creates drift. + + +These workflow halt rules are for skill-driven execution, especially hands-off/orchestrated skills and their dependencies. They do not automatically apply to ad hoc exploration, incidental verification, or low-risk authoring work unless that command is part of an active skill contract. + +Exception to `halt-on-error`: For verification/code-quality failures where diagnostics are explicit and local, continue automatically with bounded remediation. + +Allowed auto-fix scope: +- TypeScript/compiler failures (`tsc`) with clear file/line diagnostics +- Lint failures (`eslint`) with clear file/line diagnostics +- Test failures (`jest`/`yarn test`) when stack traces or assertion output identify failing test files +- `verify-repo.sh` code-step failures that resolve to one of the above + +Required behavior: +1. Briefly log rationale: failure type, affected files, and why scope is unambiguous. +2. Apply the minimal fix in the failing repo. +3. Re-run the failing verification step. +4. Limit to 2 remediation attempts; if still failing or scope expands, fall back to `halt-on-error`. + +Never auto-fix: +- Missing tools/auth (`command not found`, `PROMPT_GH_AUTH`) +- Wrong path/permissions +- Companion script contract/usage failures +- Unexpected exit codes from orchestrator scripts +- Any failure requiring destructive operations or workflow bypasses + + +Respect documented companion script exit-code contracts. Non-zero does NOT always mean fatal. + +For `~/.cursor/skills/im/scripts/lint-warnings.sh`: +- `0` = no remaining lint findings after auto-fix +- `1` = remaining lint findings after auto-fix (expected actionable state) +- `2` = execution error (fatal) + +Required behavior: +1. If exit `1`, continue workflow by fixing the remaining lint findings before implementation. +2. If the script auto-fixes pre-existing lint issues, commit those changes in a separate lint-fix commit immediately before feature commits, even if no findings remain. +3. If exit `2`, apply `halt-on-error`. + + +Do NOT silently substitute an alternative tool or approach when a command fails. If `rg` is not found, do not fall back to `grep`. If a script exits non-zero, do not manually replicate what the script does. The failure is the signal — report it. + + + + + +Scan the user's message for `/word` tokens. A token is a **command invocation** when ALL of: +- `/word` is preceded by whitespace, a newline, or is at the start of the message +- `word` contains only lowercase letters and hyphens (e.g., `/im`, `/pr-create`, `/author`) +- `/word` is NOT inside a file path, URL, or code block + +When detected: +1. Read `~/.cursor/skills//SKILL.md` and follow it immediately. +2. If the file does not exist, inform the user: "Skill `/` not found in `~/.cursor/skills/`." + +**Ignore `/`** in: file paths (`/Users/...`, `~/...`), URLs (`https://...`), mid-word (`and/or`), backticks/code blocks. + + + + diff --git a/.claude/skills b/.claude/skills new file mode 120000 index 0000000..8574c4f --- /dev/null +++ b/.claude/skills @@ -0,0 +1 @@ +../.cursor/skills \ No newline at end of file diff --git a/.cursor/.syncignore b/.cursor/.syncignore new file mode 100644 index 0000000..10363f6 --- /dev/null +++ b/.cursor/.syncignore @@ -0,0 +1,6 @@ +# Files to exclude from convention-sync (one glob per line) +# Patterns match against relative paths like: commands/foo.sh, rules/bar.mdc + +# WIP commands +commands/hudl.md +commands/github-pr-hudl.sh diff --git a/.cursor/README.md b/.cursor/README.md new file mode 100644 index 0000000..25761bc --- /dev/null +++ b/.cursor/README.md @@ -0,0 +1,364 @@ +Complete agent-assisted development workflow for Edge repositories — slash skills with companion scripts, coding standards, review standards, and the author skill. + +## Installation + +**1. Set the required env var** in your `~/.zshrc`: +```bash +export GIT_BRANCH_PREFIX=yourname # e.g. jon, paul, sam — used for branch naming and PR discovery +``` + +**2. Install files into `~/.cursor/`:** +```bash +curl -sL https://github.com/EdgeApp/edge-conventions/archive/refs/heads/jon/agents.tar.gz | \ + tar -xz --strip-components=2 -C ~/.cursor 'edge-conventions-jon-agents/.cursor' && \ + find ~/.cursor -type f -name "*.sh" -exec chmod +x {} + && \ + echo "✓ Installed into ~/.cursor/" +``` + +**3. Verify prerequisites:** +- `gh` CLI — `gh auth login` +- `jq` — `brew install jq` +- `ASANA_TOKEN` env var (Asana scripts only) + +--- + +## Table of Contents + +- [Architecture](#architecture) +- [Skills](#skills-slash-skills) +- [Companion Scripts](#companion-scripts) +- [Shared Module](#shared-module-edge-repojs) +- [Rules](#rules-mdc-files) +- [Author Skill](#author-skill) +- [Design Principles](#design-principles) + +--- + +## Architecture + +``` +.cursor/ +├── skills/ # Primary slash skills (*/SKILL.md) + skill scripts +├── scripts/ # Shared utility scripts (status dashboard, portability) +├── commands/ # Minimal legacy command wrappers (if present) +└── rules/ # Coding/review standards (.mdc) +``` + +**Separation of concerns:** +- **Commands** (`.md`) — Define agent workflows: steps, rules, edge cases. Invoked explicitly via `/command`. +- **Skills** (`SKILL.md`) — Primary workflow units invoked with `/skill-name` (or selected by context). +- **Companion scripts** (`.sh`, `.js`) — Handle deterministic operations: API calls, git ops, JSON processing. Skills call scripts; scripts never call skills. +- **Rules** (`.mdc`) — Persistent coding standards loaded on-demand by file type or command step. Two classes: **editing standards** (loaded when writing code) and **review standards** (loaded during PR review). + +All GitHub API operations use **`gh` CLI** (`gh api`, `gh api graphql`, `gh pr`). No raw `curl` + `$GITHUB_TOKEN`. + +**User-specific configuration** is driven by the `GIT_BRANCH_PREFIX` env var — set once in `.zshrc`, used by scripts for branch naming (`$GIT_BRANCH_PREFIX/feature-name`) and PR discovery. No hardcoded usernames. + +--- + +## Skills (Slash Skills) + +### Core Implementation + +| Skill | Description | +|---------|-------------| +| [`/im`](.cursor/skills/im/SKILL.md) | Implement an Asana task or ad-hoc feature/fix with clean, structured commits | +| [`/one-shot`](.cursor/skills/one-shot/SKILL.md) | Legacy-style one-command flow: `/asana-plan` → `/im` → `/pr-create` with default Asana attach/assign | +| [`/pr-create`](.cursor/skills/pr-create/SKILL.md) | Create a PR from the current branch; optional Asana attach/assign flags | +| [`/dep-pr`](.cursor/skills/dep-pr/SKILL.md) | Create dependent Asana tasks and run downstream PR workflow | +| [`/changelog`](.cursor/skills/changelog/SKILL.md) | Update CHANGELOG.md following existing patterns | + +### Planning and Context + +| Skill | Description | +|---------|-------------| +| [`/asana-plan`](.cursor/skills/asana-plan/SKILL.md) | Build implementation plans from Asana tasks or text/file requirements | +| [`/task-review`](.cursor/skills/task-review/SKILL.md) | Fetch + analyze Asana task context | +| [`/q`](.cursor/skills/q/SKILL.md) | Answer questions before taking action | + +### Review and Landing + +| Skill | Description | +|---------|-------------| +| [`/pr-review`](.cursor/skills/pr-review/SKILL.md) | Review a PR against coding and review standards | +| [`/pr-address`](.cursor/skills/pr-address/SKILL.md) | Address PR feedback with fixup commits and replies | +| [`/pr-land`](.cursor/skills/pr-land/SKILL.md) | Land approved PRs: prepare, merge, publish, and Asana updates | + +### Asana and Utility + +| Skill | Description | +|---------|-------------| +| [`/asana-task-update`](.cursor/skills/asana-task-update/SKILL.md) | Generic Asana mutations (attach PR, assign, status/field updates) | +| [`/standup`](.cursor/skills/standup/SKILL.md) | Generate daily standup from Asana + GitHub activity | +| [`/chat-audit`](.cursor/skills/chat-audit/SKILL.md) | Audit chat sessions for workflow/rule issues | +| [`/convention-sync`](.cursor/skills/convention-sync/SKILL.md) | Sync `~/.cursor` changes with the `edge-conventions` repo and update PR description | +| [`/author`](.cursor/skills/author/SKILL.md) | Create/update/debug skills and related scripts/rules | + +--- + +## Companion Scripts + +### PR Operations + +| Script | What it does | API | +|--------|-------------|-----| +| [`pr-create.sh`](.cursor/skills/pr-create/scripts/pr-create.sh) | Create PR for current branch with repo-template-aligned title/body | `gh pr create` | +| [`pr-address.sh`](.cursor/skills/pr-address/scripts/pr-address.sh) | Fetch unresolved feedback, post replies, resolve threads, mark addressed | `gh api` REST + GraphQL | +| [`github-pr-review.sh`](.cursor/skills/pr-review/scripts/github-pr-review.sh) | Fetch PR context (metadata + patches) and submit reviews | `gh pr view` + `gh api` REST | +| [`github-pr-activity.sh`](.cursor/skills/standup/scripts/github-pr-activity.sh) | List PRs by activity (recent reviews, comments, CI status) | `gh api graphql` | + +### PR Status Dashboard + +| Script | What it does | API | +|--------|-------------|-----| +| [`pr-status-gql.sh`](.cursor/scripts/pr-status-gql.sh) | PR status with review state, CI checks, new comments (primary) | `gh api graphql` | +| [`pr-status.sh`](.cursor/scripts/pr-status.sh) | Same as above, REST fallback | `gh api` REST | +| [`pr-watch.sh`](.cursor/scripts/pr-watch.sh) | TUI wrapper — auto-refresh dashboard with rate limit awareness | Delegates to above | + +### PR Landing Pipeline (`/pr-land`) + +These scripts run sequentially. Each handles one phase of the landing workflow: + +| Script | Phase | What it does | API | +|--------|-------|-------------|-----| +| [`pr-land-discover.sh`](.cursor/skills/pr-land/scripts/pr-land-discover.sh) | 1: Discovery | Find all `$GIT_BRANCH_PREFIX/*` PRs with approval status | Single `gh api graphql` query | +| [`pr-land-comments.sh`](.cursor/skills/pr-land/scripts/pr-land-comments.sh) | 2: Comment check | Detect unaddressed feedback (inline threads, review bodies, top-level comments) | `gh api graphql` per PR | +| [`pr-land-prepare.sh`](.cursor/skills/pr-land/scripts/pr-land-prepare.sh) | 3: Prepare | Autosquash → rebase → conflict detection → verification | Git only | +| [`verify-repo.sh`](.cursor/skills/verify-repo.sh) | 3b: Verify | CHANGELOG validation + `prepare`/`tsc`/`lint`/`test` | Git + yarn | +| [`pr-land-merge.sh`](.cursor/skills/pr-land/scripts/pr-land-merge.sh) | 5: Merge | Sequential merge with auto-rebase, mandatory verification | `gh api` REST | +| [`pr-land-publish.sh`](.cursor/skills/pr-land/scripts/pr-land-publish.sh) | 6: Publish | Version bump, changelog update, commit + tag (no push) | Git + npm | + +**Conflict handling is fully scripted:** +- Code conflicts → skip PR, continue with remaining +- CHANGELOG-only (including staging) → agent resolves semantically, re-runs + +### Chat Analysis + +| Script | What it does | +|--------|-------------| +| [`cursor-chat-extract.js`](.cursor/skills/chat-audit/scripts/cursor-chat-extract.js) | Parse Cursor chat export JSON into compact structured summary (messages, tool calls, stats) | + +### Asana Integration + +| Script | What it does | API | +|--------|-------------|-----| +| [`asana-get-context.sh`](.cursor/skills/asana-get-context.sh) | Fetch task details, attachments, subtasks, custom fields | Asana REST | +| [`asana-task-update.sh`](.cursor/skills/asana-task-update/scripts/asana-task-update.sh) | Generic task updates (attach PR, assign, status, fields) | Asana REST | +| [`asana-create-dep-task.sh`](.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh) | Create dependent task in another repo's project | Asana REST | +| [`asana-whoami.sh`](.cursor/skills/asana-whoami.sh) | Get current Asana user info | Asana REST | + +### Build & Deps + +| Script | What it does | +|--------|-------------| +| [`lint-commit.sh`](.cursor/skills/lint-commit.sh) | ESLint `--fix`, localize, graduate warnings, and report effective commit scope before commit | +| [`lint-warnings.sh`](.cursor/skills/im/scripts/lint-warnings.sh) | Run `eslint --fix`, then summarize any remaining lint findings with matched fix patterns | +| [`install-deps.sh`](.cursor/skills/install-deps.sh) | Install dependencies and run prepare script | +| [`upgrade-dep.sh`](.cursor/skills/pr-land/scripts/upgrade-dep.sh) | Upgrade a dependency in the GUI repo | + +### Sync & Portability + +| Script | What it does | +|--------|-------------| +| [`convention-sync.sh`](.cursor/skills/convention-sync/scripts/convention-sync.sh) | Diff and sync `~/.cursor/` files with the edge-conventions repo | +| [`tool-sync.sh`](.cursor/scripts/tool-sync.sh) | Sync Cursor rules, skills, and scripts to OpenCode and Claude Code formats | +| [`port-to-opencode.sh`](.cursor/scripts/port-to-opencode.sh) | Convert Cursor `.mdc`/`.md` files to OpenCode-compatible JSON + MD mirrors | + +--- + +## Dependency Graph + +### Skill → Skill + +```mermaid +graph LR + asanaPlan["/asana-plan"] + taskReview["/task-review"] + im["/im"] + oneShot["/one-shot"] + depPr["/dep-pr"] + prCreate["/pr-create"] + asanaTaskUpdate["/asana-task-update"] + author["/author"] + conventionSync["/convention-sync"] + + oneShot --> asanaPlan + oneShot --> im + oneShot --> prCreate + asanaPlan --> taskReview + im --> asanaPlan + depPr --> prCreate + prCreate --> asanaTaskUpdate + author --> conventionSync +``` + +Skills with no skill dependencies: + +- `/asana-task-update` +- `/task-review` +- `/q` +- `/pr-review` +- `/pr-address` +- `/pr-land` +- `/standup` +- `/chat-audit` +- `/changelog` +- `/convention-sync` + +### Full Skill/Script Dependency Graph + +Top-to-bottom organization: skill layer, skill-specific scripts, shared scripts. + +```mermaid +graph TD + subgraph skillLayer [Skills] + im["/im"] + oneShot["/one-shot"] + asanaPlan["/asana-plan"] + taskReview["/task-review"] + depPr["/dep-pr"] + prCreate["/pr-create"] + asanaTaskUpdate["/asana-task-update"] + prLand["/pr-land"] + prReview["/pr-review"] + prAddress["/pr-address"] + standup["/standup"] + chatAudit["/chat-audit"] + conventionSync["/convention-sync"] + author["/author"] + q["/q"] + changelog["/changelog"] + end + + subgraph skillScripts [Skill Scripts] + prCreateSh["pr-create.sh"] + prAddressSh["pr-address.sh"] + prReviewSh["github-pr-review.sh"] + depTaskSh["asana-create-dep-task.sh"] + asanaTaskUpdateSh["asana-task-update.sh"] + prLandDisc["pr-land-discover.sh"] + prLandCmts["pr-land-comments.sh"] + prLandPrep["pr-land-prepare.sh"] + prLandMerge["pr-land-merge.sh"] + prLandPublish["pr-land-publish.sh"] + prLandExtract["pr-land-extract-asana-task.sh"] + standupAsana["asana-standup.sh"] + standupGh["github-pr-activity.sh"] + chatExtract["cursor-chat-extract.js"] + conventionSyncSh["convention-sync.sh"] + generateClaude["generate-claude-md.sh"] + lintWarn["lint-warnings.sh"] + end + + subgraph sharedScripts [Shared Scripts] + lintCommit["lint-commit.sh"] + verifyRepo["verify-repo.sh"] + asanaGetContext["asana-get-context.sh"] + asanaWhoAmI["asana-whoami.sh"] + installDeps["install-deps.sh"] + edgeRepo["edge-repo.js"] + end + + oneShot --> asanaPlan + oneShot --> im + oneShot --> prCreate + asanaPlan --> taskReview + im --> asanaPlan + depPr --> prCreate + prCreate --> asanaTaskUpdate + author --> conventionSync + + prCreate --> prCreateSh + prCreate --> verifyRepo + prCreate --> asanaTaskUpdateSh + im --> lintWarn + im --> lintCommit + im --> verifyRepo + im --> installDeps + depPr --> depTaskSh + depPr --> asanaGetContext + asanaTaskUpdate --> asanaTaskUpdateSh + taskReview --> asanaGetContext + prAddress --> prAddressSh + prAddress --> lintCommit + prReview --> prReviewSh + prLand --> prLandDisc + prLand --> prLandCmts + prLand --> prLandPrep + prLand --> prLandMerge + prLand --> prLandPublish + prLand --> prLandExtract + prLand --> asanaTaskUpdateSh + prLand --> verifyRepo + standup --> standupAsana + standup --> standupGh + chatAudit --> chatExtract + conventionSync --> conventionSyncSh + conventionSync --> generateClaude + + depTaskSh --> asanaWhoAmI + depTaskSh --> asanaTaskUpdateSh + asanaTaskUpdateSh --> asanaWhoAmI + standupAsana --> asanaWhoAmI + prLandPrep --> edgeRepo + prLandMerge --> edgeRepo + prLandPublish --> edgeRepo +``` + +--- + +## Shared Module: `edge-repo.js` + +[`edge-repo.js`](.cursor/skills/pr-land/scripts/edge-repo.js) eliminates duplication across the `pr-land-*` scripts. Exports: + +| Function | Purpose | +|----------|---------| +| `getRepoDir(repo)` | Resolve local checkout path (`~/git/`, `~/projects/`, `~/code/`) | +| `getUpstreamBranch(repo)` | `origin/develop` for GUI, `origin/master` for everything else | +| `runGit(args, cwd, opts)` | Safe `spawnSync` wrapper with `GIT_EDITOR=true` | +| `parseConflictFiles(output)` | Extract conflicting file paths from rebase output | +| `isChangelogOnly(files)` | Check if all conflicts are in CHANGELOG.md | +| `runVerification(repoDir, baseRef, opts)` | Run the full verify script with scoped lint (supports `{requireChangelog: true}`) | +| `ghApi(endpoint, opts)` | `gh api` wrapper with method, body, paginate, jq support | +| `ghGraphql(query, vars)` | `gh api graphql` wrapper with typed variable injection | + +--- + +## Rules (`.mdc` files) + +| Rule | Activation | Purpose | +|------|-----------|---------| +| [`typescript-standards.mdc`](.cursor/rules/typescript-standards.mdc) | Loaded before editing `.ts`/`.tsx` files | TypeScript + React coding standards for **editing** (includes `simple-selectors` rule, descriptive variable names, biggystring arithmetic) | +| [`review-standards.mdc`](.cursor/rules/review-standards.mdc) | Loaded by `/pr-review` command | ~50 review-specific diagnostic rules extracted from PR history | +| [`load-standards-by-filetype.mdc`](.cursor/rules/load-standards-by-filetype.mdc) | Always applied | Auto-loads language-specific standards before editing | +| [`fix-workflow-first.mdc`](.cursor/rules/fix-workflow-first.mdc) | Always applied | Fix command/skill definitions before patching downstream symptoms | +| [`answer-questions-first.mdc`](.cursor/rules/answer-questions-first.mdc) | Always applied | Detect `?` in user messages → answer before acting; loads active command context to evaluate workflow gaps | +| [`no-format-lint.mdc`](.cursor/rules/no-format-lint.mdc) | Always applied | Don't manually fix formatting — auto-format on agent finish handles it | +| [`eslint-warnings.mdc`](.cursor/rules/eslint-warnings.mdc) | `.ts`/`.tsx` files | ESLint warning handling patterns | + +**Editing vs. review separation**: `typescript-standards` contains rules for writing code (prefer `useHandler`, use `InteractionManager`, descriptive variable names, biggystring for numeric calculations). `review-standards` contains diagnostic patterns for catching bugs during review (null `tokenId` fallback, stack trace preservation, module-level cache bugs, etc.). Both are loaded together during `/pr-review`; only `typescript-standards` is loaded during editing. + +--- + +## Author Skill + +| Skill | Purpose | +|-------|---------| +| [`author/SKILL.md`](.cursor/skills/author/SKILL.md) | Meta-skill for creating/maintaining skills, scripts, and rules. Enforces XML format, `scripts-over-reasoning`, `gh-cli-over-curl`, dependency-audit requirements before script add/update/remove, and convention-sync/CLAUDE sync post-authoring behavior. | + +--- + +## Design Principles + +1. **Scripts over reasoning** — Deterministic operations (API calls, git, JSON) go in companion scripts, not inline in commands. +2. **`gh` CLI over `curl`** — All GitHub API calls use `gh api` / `gh api graphql`. Handles auth, pagination, API versioning automatically. +3. **GraphQL over REST** — Fetch only required fields in a single request where possible. Fall back to REST only when GraphQL doesn't expose the needed data (e.g., file patches). +4. **DRY shared modules** — Common utilities extracted into `edge-repo.js` rather than duplicated across scripts. +5. **XML format** — Skills use XML structure (``, ``, ``) for reliable LLM instruction-following. +6. **Standards-first** — Load coding standards before writing or reviewing any code. +7. **Fix workflow first** — When behavior is wrong, fix the command/skill definition, not the downstream symptom. +8. **No hardcoded usernames** — All user-specific values come from `GIT_BRANCH_PREFIX` env var, set once in `.zshrc`. +9. **Minimize context** — Script output must be compact and structured. Never return raw API responses. Every token costs context. +10. **Small-model conventions** — High-frequency skills that run on faster/cheaper models use verbatim bash, file-over-args, inline guardrails, and explicit parallel instructions for reliability. +11. **Knowledge base over crawling** — Maintain curated knowledge files (e.g., `eslint-warnings.mdc`) instead of having the agent crawl/grep for information repeatedly. Pre-indexed knowledge reduces tool calls and context consumption. +12. **Continuous improvement** — Workflows feed back into their own knowledge. PR review feedback updates `review-standards.mdc`, addressed warnings update `eslint-warnings.mdc`, and chat audits surface rule gaps. Each cycle reduces repetitive context gathering by the agent and repetitive review by humans. diff --git a/.cursor/commands/github-pr-hudl.sh b/.cursor/commands/github-pr-hudl.sh new file mode 100755 index 0000000..2ec3db7 --- /dev/null +++ b/.cursor/commands/github-pr-hudl.sh @@ -0,0 +1,389 @@ +#!/usr/bin/env bash +# github-pr-hudl.sh — Fetch comprehensive GitHub PR activity for a given day. +# Detects multiple action categories for HUDL standup generation. +# +# Categories: +# - created: PRs created by user on target date +# - committed: PRs where user pushed commits on target date +# - addressed: PRs with commits after receiving review comments +# - reviewed: PRs by others that user reviewed on target date +# - commented: PRs where user posted comments on target date +# - approved: PRs that have approval (for Goals Today) +# - blocked: PRs blocked by CI or changes requested (for Handoffs) +# - open_prs: All open PRs for debug section +# +# Usage: +# github-pr-hudl.sh [--date YYYY-MM-DD] +# +# Requires: gh CLI authenticated, ASANA_TOKEN for cross-referencing +# +# Output: JSON with date, username, day_label, and category arrays +set -euo pipefail + +TARGET_DATE="" +while [[ $# -gt 0 ]]; do + case "$1" in + --date) TARGET_DATE="$2"; shift 2 ;; + *) echo "Unknown: $1" >&2; exit 1 ;; + esac +done + +if ! command -v gh &>/dev/null; then + echo "Error: gh CLI not installed" >&2; exit 1 +fi +if ! gh auth status &>/dev/null 2>&1; then + echo "PROMPT_GH_AUTH" >&2; exit 2 +fi + +USERNAME=$(gh api user --jq '.login') +ASANA_TOKEN="${ASANA_TOKEN:-}" + +export TARGET_DATE USERNAME ASANA_TOKEN + +python3 - << 'PYEOF' +import json, os, re, subprocess, sys, urllib.request, urllib.error +from datetime import date, timedelta + +USERNAME = os.environ["USERNAME"] +TARGET_DATE_STR = os.environ.get("TARGET_DATE", "") +ASANA_TOKEN = os.environ.get("ASANA_TOKEN", "") + +today = date.today() + +if TARGET_DATE_STR: + # Explicit date: use single day + target_start = date.fromisoformat(TARGET_DATE_STR) + target_end = target_start + day_label = target_start.strftime("%A") +else: + # Default: from last workday until now + if today.weekday() == 0: # Monday + target_start = today - timedelta(days=3) # Friday + target_end = today + day_label = "since Friday" + else: + target_start = today - timedelta(days=1) # Yesterday + target_end = today + day_label = "since yesterday" + +TARGET_START_STR = target_start.isoformat() +TARGET_END_STR = target_end.isoformat() + + +def gh_graphql(query, variables): + args = ["gh", "api", "graphql", "-f", f"query={query}"] + for k, v in variables.items(): + args.extend(["-f", f"{k}={v}"]) + result = subprocess.run(args, capture_output=True, text=True) + if result.returncode != 0: + print(f"GH_ERROR: {result.stderr[:300]}", file=sys.stderr) + return {"data": {"search": {"nodes": []}}} + parsed = json.loads(result.stdout) + if "errors" in parsed: + print(f"GQL_ERROR: {json.dumps(parsed['errors'][:2])}", file=sys.stderr) + return parsed + + +def extract_asana_gid(body): + if not body: + return None + m = re.search(r'asana\.com/\S*/(\d{10,})', body) + return m.group(1) if m else None + + +def fetch_asana_status(gid): + """Fetch Asana task status via API.""" + if not ASANA_TOKEN or not gid: + return None + try: + req = urllib.request.Request( + f"https://app.asana.com/api/1.0/tasks/{gid}?opt_fields=custom_fields.gid,custom_fields.display_value", + headers={"Authorization": f"Bearer {ASANA_TOKEN}"} + ) + with urllib.request.urlopen(req, timeout=5) as resp: + data = json.loads(resp.read()) + for f in data.get("data", {}).get("custom_fields", []): + if f.get("gid") == "1190660107346181": # Status field + return f.get("display_value") + except Exception as e: + print(f"ASANA_ERROR: {e}", file=sys.stderr) + return None + + +# --- Main GraphQL query for user's activity --- +QUERY_USER_PRS = """ +query($search: String!) { + search(query: $search, type: ISSUE, first: 100) { + nodes { + ... on PullRequest { + number + title + url + body + state + createdAt + repository { nameWithOwner } + reviews(last: 50) { + nodes { + author { login } + state + submittedAt + } + } + commits(last: 50) { + nodes { + commit { + committedDate + author { user { login } } + } + } + } + comments(last: 50) { + nodes { + author { login } + createdAt + } + } + reviewThreads(first: 50) { + nodes { + comments(first: 10) { + nodes { + author { login } + createdAt + } + } + } + } + reviewDecision + statusCheckRollup { + state + } + } + } + } +} +""" + +# Search 1: User's own PRs (open or recently updated) +search_authored = f"is:pr author:{USERNAME} updated:>={TARGET_START_STR} sort:updated" +authored_raw = gh_graphql(QUERY_USER_PRS, {"search": search_authored}) + +# Search 2: PRs reviewed by user +search_reviewed = f"is:pr reviewed-by:{USERNAME} -author:{USERNAME} updated:>={TARGET_START_STR} sort:updated" +reviewed_raw = gh_graphql(QUERY_USER_PRS, {"search": search_reviewed}) + +# Search 3: PRs where user commented +search_commented = f"is:pr commenter:{USERNAME} -author:{USERNAME} updated:>={TARGET_START_STR} sort:updated" +commented_raw = gh_graphql(QUERY_USER_PRS, {"search": search_commented}) + +search_count = 0 +for raw in [authored_raw, reviewed_raw, commented_raw]: + search_count += len(raw.get("data", {}).get("search", {}).get("nodes", [])) + +print(f"Searched {search_count} PR candidates", file=sys.stderr) + +# --- Process authored PRs --- +created = [] +committed = [] +addressed = [] +approved = [] +blocked = [] +open_prs = [] + +seen_prs = set() + +for node in authored_raw.get("data", {}).get("search", {}).get("nodes", []): + if not node or "number" not in node: + continue + + pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}" + if pr_key in seen_prs: + continue + seen_prs.add(pr_key) + + asana_gid = extract_asana_gid(node.get("body")) + asana_status = fetch_asana_status(asana_gid) if asana_gid else None + + pr_entry = { + "pr_number": node["number"], + "pr_title": node["title"], + "pr_url": node["url"], + "repo": node["repository"]["nameWithOwner"], + "asana_gid": asana_gid, + "asana_status": asana_status, + } + + # Check if created within target window + created_at = (node.get("createdAt") or "")[:10] + if TARGET_START_STR <= created_at <= TARGET_END_STR: + created.append(pr_entry) + + # Check for human reviews before target window + has_prior_review = False + for r in (node.get("reviews") or {}).get("nodes", []): + if not r or not r.get("author"): + continue + reviewer = r["author"].get("login", "") + if reviewer == USERNAME or "[bot]" in reviewer: + continue + submitted = (r.get("submittedAt") or "")[:10] + if submitted < TARGET_START_STR and r.get("state") in ("CHANGES_REQUESTED", "COMMENTED"): + has_prior_review = True + break + + # Check for commits within target window + commits_in_window = [] + for c in (node.get("commits") or {}).get("nodes", []): + commit = (c or {}).get("commit", {}) + committed_date = (commit.get("committedDate") or "")[:10] + commit_user = ((commit.get("author") or {}).get("user") or {}).get("login", "") + if TARGET_START_STR <= committed_date <= TARGET_END_STR and commit_user == USERNAME: + commits_in_window.append(commit) + + if commits_in_window: + entry_with_count = {**pr_entry, "commit_count": len(commits_in_window)} + # Only count as addressed/committed if PR wasn't created in window + if not (TARGET_START_STR <= created_at <= TARGET_END_STR): + if has_prior_review: + addressed.append(entry_with_count) + else: + committed.append(entry_with_count) + + # Track open PRs for debug and blocked/approved analysis + if node.get("state") == "OPEN": + review_decision = node.get("reviewDecision") + ci_state = (node.get("statusCheckRollup") or {}).get("state") + + # Determine status summary + status_parts = [] + if review_decision: + status_parts.append(review_decision.lower().replace("_", " ")) + if ci_state: + status_parts.append(f"CI: {ci_state.lower()}") + if asana_status: + status_parts.append(f"Asana: {asana_status}") + + open_prs.append({ + **pr_entry, + "review_decision": review_decision, + "ci_state": ci_state, + "status_summary": ", ".join(status_parts) if status_parts else "open" + }) + + # Check if approved (GitHub approved OR Asana Publish Needed) + if review_decision == "APPROVED" or asana_status == "Publish Needed": + approved.append(pr_entry) + + # Check if blocked + if ci_state == "FAILURE": + blocked.append({**pr_entry, "block_reason": "ci_failure", "detail": "CI failing"}) + elif review_decision == "CHANGES_REQUESTED": + # Find who requested changes + changers = [] + for r in (node.get("reviews") or {}).get("nodes", []): + if r and r.get("state") == "CHANGES_REQUESTED": + author = (r.get("author") or {}).get("login", "") + if author and author not in changers: + changers.append(author) + blocked.append({ + **pr_entry, + "block_reason": "changes_requested", + "detail": ", ".join(changers) if changers else "reviewer" + }) + +# --- Process reviewed PRs --- +reviewed = [] +for node in reviewed_raw.get("data", {}).get("search", {}).get("nodes", []): + if not node or "number" not in node: + continue + + pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}" + if pr_key in seen_prs: + continue + seen_prs.add(pr_key) + + # Find user's review within target window + review_state = None + for r in (node.get("reviews") or {}).get("nodes", []): + if not r or not r.get("author"): + continue + if r["author"].get("login") != USERNAME: + continue + submitted = (r.get("submittedAt") or "")[:10] + if TARGET_START_STR <= submitted <= TARGET_END_STR: + review_state = r.get("state", "COMMENTED") + break + + if review_state: + reviewed.append({ + "pr_number": node["number"], + "pr_title": node["title"], + "pr_url": node["url"], + "repo": node["repository"]["nameWithOwner"], + "asana_gid": extract_asana_gid(node.get("body")), + "review_state": review_state, + }) + +# --- Process commented PRs --- +commented_list = [] +for node in commented_raw.get("data", {}).get("search", {}).get("nodes", []): + if not node or "number" not in node: + continue + + pr_key = f"{node['repository']['nameWithOwner']}#{node['number']}" + if pr_key in seen_prs: + continue + seen_prs.add(pr_key) + + # Check for comments by user on target date + has_comment = False + + # Issue comments + for c in (node.get("comments") or {}).get("nodes", []): + if not c: + continue + author = (c.get("author") or {}).get("login", "") + created = (c.get("createdAt") or "")[:10] + if author == USERNAME and TARGET_START_STR <= created <= TARGET_END_STR: + has_comment = True + break + + # Review thread comments + if not has_comment: + for thread in (node.get("reviewThreads") or {}).get("nodes", []): + for c in (thread.get("comments") or {}).get("nodes", []): + if not c: + continue + author = (c.get("author") or {}).get("login", "") + created = (c.get("createdAt") or "")[:10] + if author == USERNAME and TARGET_START_STR <= created <= TARGET_END_STR: + has_comment = True + break + if has_comment: + break + + if has_comment: + commented_list.append({ + "pr_number": node["number"], + "pr_title": node["title"], + "pr_url": node["url"], + "repo": node["repository"]["nameWithOwner"], + "asana_gid": extract_asana_gid(node.get("body")), + }) + +print(json.dumps({ + "date_start": TARGET_START_STR, + "date_end": TARGET_END_STR, + "day_label": day_label, + "username": USERNAME, + "search_count": search_count, + "created": created, + "committed": committed, + "addressed": addressed, + "reviewed": reviewed, + "commented": commented_list, + "approved": approved, + "blocked": blocked, + "open_prs": open_prs, +}, indent=2)) +PYEOF diff --git a/.cursor/commands/hudl.md b/.cursor/commands/hudl.md new file mode 100644 index 0000000..adf630a --- /dev/null +++ b/.cursor/commands/hudl.md @@ -0,0 +1,229 @@ +Generate a daily HUDL document from GitHub PR activity, upload to a single persistent private gist. + + +PR names are the clickable link: `[{title}]({url})`. Never add a separate URL. +All HUDL files go into ONE gist with description "HUDL Notes". Create on first run, add files on subsequent runs. Never overwrite — append a suffix (`-1`, `-2`, etc.) if the filename exists. +Delete the local file after successful gist upload. +Set `block_until_ms: 120000` for the companion script. +PRs with Asana GIDs in body should have their Asana status fetched to determine true workflow status. + + + +Run the companion script: + +```bash +~/.cursor/commands/github-pr-hudl.sh +``` + +If the user supplies a specific date, pass `--date YYYY-MM-DD`. + +Capture stdout (JSON) and stderr (diagnostics) separately. + + + +The JSON output has these fields: +- `date_start`, `date_end`: The time window (e.g., Friday to Monday for Monday HUDL) +- `day_label`: Display label (e.g., "since Friday" or "since yesterday") + +And these arrays: +- `created`: PRs created within window +- `committed`: PRs where user pushed commits within window +- `addressed`: PRs with commits after receiving review comments +- `reviewed`: PRs by others that user reviewed +- `commented`: PRs where user posted comments +- `approved`: PRs that have approval (for Goals Today) +- `blocked`: PRs blocked by CI failure or changes requested (for Handoffs) +- `open_prs`: All open PRs for debug section + +Each entry has: `pr_number`, `pr_title`, `pr_url`, `repo`, `asana_gid` (nullable), `asana_status` (nullable), plus action-specific fields. + + + +Build the markdown file with EXACTLY the structure below. Every heading, bullet, and blank line matters. + + +Line 1 of the file. Use `date_end` from the JSON for the header date. + +``` +# HUDL Notes — {full_weekday_name} {full_month_name} {day}, {year} +``` + +Example: `# HUDL Notes — Monday February 17, 2026` + + + +``` +## Accomplishments {day_label} +``` + +Use `day_label` from the JSON (either `"yesterday"` or `"Friday"`). + +Categorize each PR into exactly ONE subsection based on its PRIMARY action. Determine the primary action using this priority (highest first): + +1. `created` → goes in **PR'd** +2. `addressed` → goes in **Addressed PR Comments** +3. `reviewed` → goes in **Reviewed PRs** +4. `committed` or `commented` → goes in **General** + +A PR appears in only ONE subsection — the highest-priority one that matches. + +**Subsection: PR'd** — include only if at least one PR qualifies. + +``` +### PR'd + +- [{pr_title}]({pr_url}) ({repo}) +``` + +One bullet per PR. No action text — the heading says it. + +**Subsection: Addressed PR Comments** — include only if at least one PR qualifies. + +``` +### Addressed PR Comments + +- [{pr_title}]({pr_url}) ({repo}) +``` + +**Subsection: Reviewed PRs** — include only if at least one PR qualifies. + +``` +### Reviewed PRs + +- [{pr_title}]({pr_url}) ({repo}) — approved +``` + +Append the review verdict in lowercase after ` — `. Map `review_state`: +- `APPROVED` → `approved` +- `CHANGES_REQUESTED` → `changes requested` +- `COMMENTED` → `commented` + +**Subsection: General** — include only if at least one PR qualifies. + +``` +### General + +- [{pr_title}]({pr_url}) ({repo}) — Committed: 3 commits +``` + +Format each action type: +- `committed` → `Committed: {commit_count} commits` +- `commented` → `Commented` + +If a PR has multiple actions in General, join with `; `. + +**Omit any subsection that would have zero bullets.** + + + +``` +## Goals Today +``` + +List PRs from the `approved` array (PRs that are approved and ready to merge/publish): + +``` +- Publish [{pr_title}]({pr_url}) +``` + +After all approved items (or immediately if there are none), add one blank bullet for the user to fill in: + +``` +- +``` + + + +``` +## Handoffs +``` + +Group entries from the `blocked` array by block reason. + +**CI Failures** — if any PR has `block_reason=ci_failure`: + +``` +### Blocked by CI + +- [{pr_title}]({pr_url}) — CI failing +``` + +**Changes Requested** — if any PR has `block_reason=changes_requested`: + +``` +### Changes Requested + +- [{pr_title}]({pr_url}) — {reviewer} requested changes +``` + +If the blocked array is completely empty, write: + +``` +None +``` + + + +Add a horizontal rule, then a collapsed details block. + +``` +--- + +
Debug: {N} open PRs + +``` + +Where `{N}` is the length of the `open_prs` array. + +For each entry in `open_prs`, write: + +``` +- [{pr_title}]({pr_url}) — {status_summary} +``` + +Where `status_summary` includes: review state, CI status, Asana status (if present). + +End with search stats and close the details tag: + +``` + +*Searched {search_count} PRs* + +
+``` + +`search_count` comes from the JSON. +
+
+ + +1. Write the markdown to `hudl-{date}.md` in the current working directory. +2. Upload to gist using this exact bash logic: + +```bash +GIST_ID=$(gh gist list --limit 100 --filter "HUDL Notes" | head -1 | awk '{print $1}') +FILENAME="hudl-{date}.md" + +if [ -n "$GIST_ID" ]; then + FILES=$(gh gist view "$GIST_ID" --files) + N=1 + BASE="hudl-{date}" + while echo "$FILES" | grep -q "$FILENAME"; do + N=$((N + 1)) + FILENAME="${BASE}-${N}.md" + done + [ "$FILENAME" != "hudl-{date}.md" ] && mv "hudl-{date}.md" "$FILENAME" + gh gist edit "$GIST_ID" --add "$FILENAME" +else + gh gist create --desc "HUDL Notes" "$FILENAME" + GIST_ID=$(gh gist list --limit 1 --filter "HUDL Notes" | awk '{print $1}') +fi + +rm "$FILENAME" +``` + +3. Present a brief summary to the user: + - Number of accomplishment items + - Number of handoffs + - Gist URL: `https://gist.github.com/{username}/{GIST_ID}` + diff --git a/.cursor/rules/after_each_chat.mdc b/.cursor/rules/after_each_chat.mdc new file mode 100644 index 0000000..79b7294 --- /dev/null +++ b/.cursor/rules/after_each_chat.mdc @@ -0,0 +1,24 @@ +--- +description: Send full chat response to Telegram at end of each turn +alwaysApply: false +--- + +Send the full response of every chat turn to Telegram via chunked messages. + + +Execute at the end of every chat turn without exception. +Send the complete response content, not an abbreviated summary. +Do not ask for confirmation or print extra explanation. +Send in a single Shell command. Do not write intermediate files, call date, or use multiple steps. +The script takes NO arguments. Pipe text via stdin. Do NOT pass file paths, workspace names, or any arguments. + + + +Pipe your full response to the send script via a heredoc in a single Shell command: +```bash +python3 ~/.cursor-autopilot/telegram-send.py << 'ENDOFMSG' + +ENDOFMSG +``` +The script reads stdin, splits the message into chunks that fit Telegram's 4096-character limit, and sends each sequentially. Credentials are resolved automatically. Do NOT write intermediate files, call date, or pass any arguments. + diff --git a/.cursor/rules/answer-questions-first.mdc b/.cursor/rules/answer-questions-first.mdc new file mode 100644 index 0000000..0cde491 --- /dev/null +++ b/.cursor/rules/answer-questions-first.mdc @@ -0,0 +1,18 @@ +--- +description: Detect questions in prompts and answer them before making changes +alwaysApply: true +--- + +# Answer Questions Before Acting + +Before using any code editing tools, scan the user's message for `?` characters and determine if it's a question. + +- **Ignore** `?` inside code, URLs or query parameters (e.g. `?param=x`, `?key=value` , `const x = ifTrue ? 'yes' : 'no'`) +- **Treat all other `?`** as question statements, if they appear to be questions. + +If questions are detected: + +1. Read `~/.cursor/skills/q/SKILL.md` and follow its workflow to answer every question. +2. **Workflow context**: If a skill was invoked earlier in this conversation, note which one. When a question or critique references agent behavior from that execution, load the skill definition before answering and evaluate whether the skill should have governed that behavior. If it should have but didn't, that's a workflow gap — treat it as the primary concern per `fix-workflow-first.mdc`. +3. Do **not** edit files, create files, or run mutating commands until the user responds. +4. Only proceed with implementation after the user permits it in a follow-up message. diff --git a/.cursor/rules/eslint-warnings.mdc b/.cursor/rules/eslint-warnings.mdc new file mode 100644 index 0000000..bb30cc9 --- /dev/null +++ b/.cursor/rules/eslint-warnings.mdc @@ -0,0 +1,10 @@ +--- +description: Guidance for addressing ESLint warnings in the codebase +globs: ["**/*.ts", "**/*.tsx"] +alwaysApply: false +--- + +# ESLint Warning Fixes + +- Skip deprecation warnings (`@typescript-eslint/no-deprecated`) unless explicitly asked to address them. +- After addressing warnings, run `yarn update-eslint-warnings` to update the baseline. diff --git a/.cursor/rules/load-standards-by-filetype.mdc b/.cursor/rules/load-standards-by-filetype.mdc new file mode 100644 index 0000000..d272c04 --- /dev/null +++ b/.cursor/rules/load-standards-by-filetype.mdc @@ -0,0 +1,19 @@ +--- +description: +alwaysApply: true +--- + +Load language-specific coding standards before editing or investigating lint/type errors in files, without redundant reads. + + +Before using any code editing tool on a file OR investigating lint/type errors in that file type, check if the matching standards rule is already present in `cursor_rules_context`. Only read the rule file if it is NOT already in context. +If the rule is not in context, read it using the Read tool and follow its contents BEFORE making the edit or investigating the error. + + + + +| File glob | Standards file | +|---|----| +| `**/*.ts`,`**/*.tsx` | `~/.cursor/rules/typescript-standards.mdc` | + + diff --git a/.cursor/rules/no-format-lint.mdc b/.cursor/rules/no-format-lint.mdc new file mode 100644 index 0000000..76cd248 --- /dev/null +++ b/.cursor/rules/no-format-lint.mdc @@ -0,0 +1,11 @@ +--- +description: Prevent agent from spending tokens on formatting and lint fixing +alwaysApply: true +--- + +# No Manual Formatting or Lint Fixing + +- Do NOT run `yarn lint`, `yarn fix`, `yarn verify`, or any lint/format shell commands unless explicitly asked. +- Do NOT manually fix formatting issues (whitespace, quotes, semicolons, trailing commas, line length). The `lint-commit.sh` script runs `eslint --fix` (including Prettier) before each commit. +- Only use `ReadLints` to check for logical or type errors, not formatting. If the only lint errors are formatting-related, ignore them. +- Focus tokens on correctness and logic, not style. diff --git a/.cursor/rules/review-standards.mdc b/.cursor/rules/review-standards.mdc new file mode 100644 index 0000000..f888efa --- /dev/null +++ b/.cursor/rules/review-standards.mdc @@ -0,0 +1,199 @@ +--- +description: Review-specific coding conventions for Edge codebase PR reviews. Load alongside typescript-standards.mdc during code review. +globs: [] +alwaysApply: false +--- + +Provide project-specific review patterns to detect in PR code — anti-patterns and conventions that go beyond the editing standards in typescript-standards.mdc. + +
+ +Don't use shorthand `.catch(showError)` — it loses the calling file from stack traces. +❌ `doSomething().catch(showError)` +✅ `doSomething().catch((error: unknown) => showError(error))` + + +Don't double down on `@ts-expect-error` when trivial fixes exist. Use `?? []`, `?? {}`, or explicit type annotations instead of suppressing type errors. + +Use `!== undefined` when `null` has semantic meaning (like "delete this field"). `!= null` treats both the same. +❌ `const changed = value != null` (when null means "delete") +✅ `const changed = value !== undefined` + + +Always `await` async operations for proper spinners, double-click prevention, and race condition avoidance. +❌ `wallet.saveTxMetadata(params).catch(showError)` +✅ `await wallet.saveTxMetadata(params)` + + +When the whole function is async and the caller handles errors, don't add a separate `.catch()`. +❌ `const handle = async () => { await op().catch(err => showError(err)) }` +✅ `const handle = async () => { await op() }` + + +When `tokenId` is a non-null string, any dereference using it must succeed or throw. Never fall back to `null` — it silently changes the intended asset from "this specific token" to "native currency." + +When a global error handler (e.g., `withExtendedTouchable`) already catches and displays errors, don't add local `.catch(showError)` — it causes errors to display twice. Only add explicit handling when you need specific error types, cleanup, or there's no global handler. + +User cancellations (closing modals, pressing back) should exit silently, not show a generic error. +❌ `try { await modal() } catch (error) { showError(error) }` +✅ `if (error instanceof UserCancelledError) return; showError(error)` + + +Catch blocks should not always throw the same generic error. Only throw specific messages for expected errors (e.g., API 400); re-throw the original for unexpected ones so users see accurate messages. + +Verify arrays have elements before indexing. `vin.addresses[0]` is `undefined` when the array is empty — check before passing to functions that can't handle undefined. + +Don't compare tokenIds with currency codes — they are different identifier types that will never match. Use `request.fromTokenId` when checking against a list of tokenIds, not `request.fromCurrencyCode`. + +Use optional chaining on lookup tables with dynamic keys. +❌ `TABLE[pluginId].includes(tokenId)` (TypeError if key missing) +✅ `TABLE[pluginId]?.includes(tokenId) ?? false` + + +If a validation applies to all code paths, perform it once at function entry rather than repeating in each branch. + +
+ +
+ +Prefer `useHandler` (from `hooks/useHandler`) over `useCallback` for event handlers and async functions. Provides better TypeScript inference and handles async more gracefully. + +If two `useEffect` hooks update related state from related dependencies, combine them into one effect to avoid redundant renders. + +Extract complex display logic to helper functions with early returns instead of nested ternaries or inline conditional chains. + +Use `StyleSheet.compose(baseStyle, customStyle)` for style composition. Handles null automatically — no manual array handling needed. + +iOS number-pad keyboards don't support certain `returnKeyType` values ("Can't find keyplane" warning). Conditionally set: `returnKeyType={Platform.OS === 'ios' ? undefined : 'done'}` + +When replacing one component with another, ensure all props (color, size, style) are carried over. Check the original component's props before replacing — missing visual props change appearance. + +When switching icon libraries, wrap replacement icons in a `View` with the original margin/padding styles if the new component doesn't accept the same style props. + +Wrap navigation calls (push, pop, replace) after complex gestures (slider completion, swipe) in `InteractionManager.runAfterInteractions()`. Navigating while the gesture system is active causes crashes on physical devices. + +Disable interactive elements during async operations to prevent double-taps and race conditions. Use a `pending` state and pass it to the component for visual feedback. + +
+ +
+ +Don't track Redux state locally with `useState(reduxValue)` — it becomes stale when Redux updates. Read from `useSelector` directly. + +Module-level cached state that doesn't reset on logout/login leaks data between users. Export a clear function and call it on logout. This is a recurring bug pattern. + +Local account settings belong in Redux, not separate module-level caches. Redux is the right place for globally-available account information. + +Use `account.dataStore.setItem/getItem` instead of `account.localDisklet` directly. Disklet filenames are stored in plaintext, leaking information the server shouldn't see. DataStore encrypts filenames. + +When changing storage formats, always include migration code: read old format, convert, write new format, delete old. Users have existing data on disk. + +When updating nested state objects in storage, merge with existing state to avoid overwriting concurrent updates from other parts of the app. +❌ `notifState: newNotifState` (overwrites sibling keys) +✅ `notifState: { ...settings.notifState, ...newNotifState }` + + +
+ +
+ +Always use `makePeriodicTask` instead of `setInterval`, especially for async work. Provides proper start/stop lifecycle and handles overlapping invocations. + +Background services go in `components/services/` as React components. Component-based mounting ensures clean lifecycle tied to login/logout. Avoid excessive background work — trigger only when needed. + +Use a `runOnce` helper or `pending` flag to prevent duplicate parallel calls when functions can be triggered multiple times (button presses, retries). + +When implementing cancellable polling, check the cancel flag after every `await`, not just at loop start. The flag can change during any async gap. + +In `setTimeout`/interval callbacks, read state fresh inside the callback. Closures capture stale values — especially problematic for callbacks that fire much later. + +Track `setTimeout` IDs in services/engines with a `Set` and clear them all in the shutdown method. Stale timeouts fire on cleared/deallocated state. + +When async event handlers operate on shared resources (files, git repos, databases), serialize operations per resource using a pending-operation map or queue. Fire-and-forget `.catch()` patterns cause race conditions on rapid events. + +
+ +
+ +All network responses and disk reads must be cleaned with the cleaners library before use. Access cleaned values, not raw data. + +Derive types from cleaners with `ReturnType`. Don't duplicate type definitions alongside cleaner definitions. + +`asOptional` accepts both `undefined` AND `null` despite the name. To preserve the null/undefined distinction, use `asOptional(asEither(asNull, asString), null)` with a default. + +New fields added to cleaners for persisted data MUST use `asOptional` unless migration code is included. Existing data on disk won't have the new field — non-optional fields cause load failures. + +Remove or comment out unused fields in cleaners. Dead cleaner fields add noise and can mislead. + +
+ +
+ +Don't leave dead or unused code "just in case." Git history preserves it. This includes unused variables, unreachable branches, and commented-out blocks. + +Don't declare variables just to pass them to a function — inline the parameters. Exception: typed constants for functions with untyped/`any` parameters, where the constant provides compile-time checking. + +Before creating a new utility, check for existing helpers: `getTokenId`/`getTokenIdForced` instead of `getWalletTokenId`, `getExchangeDenom` instead of custom multiplier lookups. + +Use existing mock data from `src/util/fake/` or consolidate new mocks there. Duplicated half-baked mock data breaks on core changes. + +Never commit hardcoded sandbox URLs or debug flags. Use environment configuration (`envConfig.*`, `__DEV__`). + +Don't use local file paths (`file:../my-package`) in package.json dependencies. Breaks builds for other developers and CI. + +No unguarded `console.log` in production code. Guard with `ENV.DEBUG_VERBOSE_LOGGING` or remove entirely. + +Use a single validation function for both real-time and submit-time checks. Duplicated validation with different thresholds lets users submit invalid forms. + +Use local synchronous helpers (`div` from biggystring + `getExchangeDenom`) for amount conversions instead of async wallet API calls that cross an expensive bridge. Always specify decimal precision to avoid integer truncation: `div(native, multiplier, 18)` not `div(native, multiplier)`. + +Use established libraries (e.g., `rfc4648` for base64) instead of hand-rolling standard algorithms. Hand-rolled implementations miss edge cases and add maintenance burden. + +When a value appears in multiple configuration locations, ensure they match. Extract shared constants to prevent silent drift. + +Delete style properties from `StyleSheet.create` that aren't referenced by any component. Unused styles add noise. + +
+ +
+ +Search the localization file (`en_US.json`) before adding new keys. Don't create duplicates of existing strings. + +String keys describe semantic meaning, not UI location. +❌ `signup_screen_get_started` +✅ `get_started_button` + + +Prompts describe the action, not the gesture. Doesn't translate well across platforms. +❌ `"Tap to select a country"` +✅ `"Select a country"` + + +Error messages and user-facing strings are localized in the GUI layer, not in API/plugin code. API layers throw structured errors (e.g., `NetworkError('CONNECTION_FAILED')`) that the GUI translates for display. + +
+ +
+ +Document constraints that aren't obvious from the code: `// EVM-only: assumes EVM contract address format` + +Remove comments when the context they describe has changed. Stale comments mislead more than missing comments. + +Good comments explain reasoning, not mechanics. +❌ `// Loop through items and filter by status` +✅ `// Only active items can be edited; archived items are read-only` + + +
+ +
+ +Place all dependencies in `devDependencies` except cleaner packages (which may be exported as types to NPM consumers). + +Server and client configuration in separate files (`serverConfig.json`, `clientConfig.json`), both validated with cleaners via `cleaner-config`. Prevents accidentally exposing server secrets to clients. + +Server processes use PM2 with `pm2.json` at repo root. API processes in cluster mode (`"instances": "max"`); engine processes as single instances to avoid duplicate background work. + +When a server repo has both backend and frontend, the `build` script must build both. Use `npm-run-all -p build.*` to run in parallel. + +
diff --git a/.cursor/rules/typescript-standards.mdc b/.cursor/rules/typescript-standards.mdc new file mode 100644 index 0000000..4700608 --- /dev/null +++ b/.cursor/rules/typescript-standards.mdc @@ -0,0 +1,269 @@ +--- +description: TypeScript/React coding standards for error handling, types, and patterns +globs: ["**/*.ts","**/*.tsx"] +alwaysApply: false +--- + +Enforce TypeScript and React coding standards in all `.ts`/`.tsx` file edits. + + + +NEVER use hard-coded user-facing strings. All display text MUST come from localized string resources (`lstrings.*`). This includes error messages, labels, placeholders, and any text visible to users. +❌ `setError('Something went wrong')` +❌ `Loading...` +✅ `setError(lstrings.generic_error)` +✅ `{lstrings.loading}` + + +Localized strings with placeholders MUST use numbered suffixes (`_1s`, `_2s`, etc.) and positional `sprintf` args (`%1$s`, `%2$s`). +❌ `warning_message: 'Amount %s exceeds limit of %s'` +✅ `warning_message_2s: 'Amount %1$s exceeds limit of %2$s'` +❌ `sprintf(lstrings.warning_header, 'this item')` +✅ `sprintf(lstrings.warning_header_1s, itemName)` + + +NEVER use `any` types. Define an interface, type, or cleaner. If truly unavoidable, add a comment explaining why. + +NEVER use optional chaining results directly in conditions. +❌ `if (obj?.prop)` → ✅ `if (obj?.prop != null)` +❌ `if (obj?.arr?.length > 0)` → ✅ `if (obj?.arr != null && obj.arr.length > 0)` + + +NEVER use empty rejection handlers that silently swallow errors. +❌ `.catch(() => {})` +✅ `.catch((err: unknown) => { showError(err) })` +Exception: Empty handlers are acceptable ONLY when the rejection is an expected user action (e.g., user cancelled a modal) AND there's nothing to clean up. + + +Catch blocks MUST use `(error: unknown) => {...}` format. + +Do not use the `void` operator to silence Promise returns. Create a non-async handler wrapping the async call with explicit error handling. +❌ `onSwipe={() => { void doAsync() }}` +✅ `const onSwipe = useHandler(() => { doAsync().catch((err: unknown) => { showError(err) }) })` + + +Do not use inline styles in JSX. Use `getStyles`/`cacheStyles` (static) and memoized (derived) for style definitions. + +JSX event handler props MUST NOT use inline arrow functions. Create named handlers. + + + + + +Use `??` instead of `||` for default values. `??` only treats `null`/`undefined` as missing; `||` treats all falsy values as missing. +❌ `config.timeout || 5000` → ✅ `config.timeout ?? 5000` (preserves `0`) +❌ `user.name || 'Anonymous'` → ✅ `user.name ?? 'Anonymous'` (preserves `''`) + + +Prefer flat boolean expressions over nested if/return in filter/predicate functions. +❌ `if (x != null) { if (f(x).match(y)) { return true } }; return otherResult` +✅ `return (x != null && f(x).match(y)) || otherResult` + + +Do not add branches that return the same value as the final return. +❌ `if (node.type === 'TSNullKeyword') { return false }; return false` +✅ `return false` + + +When a handler only forwards to another function with no additional logic, pass the function directly. +❌ `const handleComplete = useHandler(() => { onComplete?.() })` +✅ `onPress: onComplete` + + +Extract reusable helpers for common boilerplate patterns (e.g., "run at most once in parallel"). + +Avoid calling expensive transformation functions (like `normalizeForSearch`, `toLowerCase`) inside loops when the input doesn't change per iteration. Pre-compute outside the loop. +❌ `items.filter(item => searchTerms.every(term => normalize(item.name).includes(term)))` +✅ `items.filter(item => { const n = normalize(item.name); return searchTerms.every(term => n.includes(term)) })` + + +Use `asJSON` cleaner instead of manual `JSON.parse`. +❌ `const data = asMyCleaner(JSON.parse(text))` +✅ `const data = asJSON(asMyCleaner)(text)` + + +Use TanStack Query (`useQuery`) for async data fetching instead of `useEffect`/`useState` patterns. +❌ `const [data, setData] = useState(null); useEffect(() => { fetchData().then(setData) }, [])` +✅ `const { data } = useQuery({ queryKey: ['myData', deps], queryFn: fetchData, enabled: deps != null })` + + +Use specific Redux selectors to avoid unnecessary re-renders. +❌ `const { countryCode } = useSelector(state => state.ui.settings)` +✅ `const countryCode = useSelector(state => state.ui.settings.countryCode)` + + +Keep `useSelector` callbacks simple — only access state, never derive. Derivation logic belongs in `useMemo` (or inline) after all referenced variables are declared. Selector callbacks run on every store update and can reference hoisted-but-uninitialized variables, causing silent bugs. +❌ `const result = useSelector(state => { const x = expensiveFn(someVar, state.foo); return x })` +✅ `const foo = useSelector(state => state.foo)` then `const result = useMemo(() => expensiveFn(someVar, foo), [someVar, foo])` + + +Use `React.FC` for component exports. Use `React.ReactElement` for non-component render functions. +❌ `const Component = (props: Props): React.JSX.Element => {` +✅ `const Component: React.FC = props => {` + + +Use descriptive variable names that clearly indicate their purpose. Avoid single/few-letter variables except in trivial cases (loop counters, mathematical formulas). +❌ `const s = asMaybePrivateNetworkingSetting(cfg.userSettings)` +❌ `const ds = asMaybePrivateNetworkingSetting(cfg.currencyInfo.defaultSettings)` +❌ `return (s ?? ds)?.networkPrivacy === 'nym'` +✅ `const userSettings = asMaybePrivateNetworkingSetting(currencyConfig.userSettings)` +✅ `return userSettings?.networkPrivacy === 'nym'` + + +Always include cleanup functions in `useEffect` hooks that create timers, intervals, subscriptions, or other side effects. + +Code comments and READMEs document the current state of the code, not the history of changes. + +Use `biggystring` for all numeric calculations involving crypto amounts, fiat values, or exchange rates. Native JS floating-point math loses precision. Values from `convertCurrency`, `convertNativeToExchange`, and similar helpers are already biggystring-compatible strings. +❌ `const impact = (parseFloat(from) - parseFloat(to)) / parseFloat(from)` +✅ `const impact = div(sub(from, to), from, 8)` + + +When deriving arrays or objects from props/state (e.g. `Object.values()`, `Object.keys()`, `.filter()`, `.map()`), wrap in `React.useMemo` if the result is used in a dependency array or passed as a prop. Bare derivations create new references every render. +❌ `const wallets = Object.values(currencyWallets)` (used in effect deps) +✅ `const wallets = React.useMemo(() => Object.values(currencyWallets), [currencyWallets])` + + +When guarding against re-fetching with nullable map lookups, check for the success payload specifically — not just entry existence. Storing error results as non-null entries permanently blocks retry if the guard only checks `== null`. +❌ `if (resultMap[id] == null) fetchData(id)` (error entries block retry) +✅ `if (resultMap[id]?.data == null) fetchData(id)` (only skip when data is present) +Exception: Auto-load effects where infinite retry on persistent failure is undesirable — keep `== null` there and allow retry only via explicit user action. + + +Component files (`.tsx`) and utility files (`.ts`) follow a consistent section ordering. + +**File-level ordering:** +1. Imports +2. Types / Interfaces — exported types first, then internal `Props` +3. Constants +4. Main component (`export const Scene: React.FC`) +5. Sub-components (internal, non-exported) +6. Styles (`getStyles` / `cacheStyles`) +7. Helpers / utility functions — pure functions at the very end of the file + +**Component body ordering:** +1. Props destructuring +2. Theme / styles (`useTheme`, `getStyles`) +3. State (`useState`) +4. Refs (`useRef`) +5. Selectors (`useSelector`, `useWatch`) +6. Derived values / `useMemo` +7. Handlers (`useHandler`) +8. Effects (`useEffect`, `useBackEvent`) +9. Return JSX + + + + + + +`@typescript-eslint/strict-boolean-expressions` on `any`-typed value. +Cause: Variable is `any` because it comes from an untyped method or third-party code. +Fix: Type-annotate the variable to remove `any`. Do NOT use explicit comparisons — they don't help when the value itself is `any`. +❌ `if (!result.ok)` where `result` is `any` +✅ `const results: Array> = await wallet.split(items)` +Known untyped methods: `EdgeCurrencyWallet.split()` returns `Array>`. + + +Strict boolean on nullable/optional values. +Cause: Using truthy check on value that could be `null`, `undefined`, `0`, or `''`. +Fix: Use explicit nullish comparison (`!= null`, `!== ''`, `> 0`). +❌ `if (value)` where `value` is `string | undefined` +✅ `if (value != null && value !== '')` +❌ `if (array.length)` → ✅ `if (array.length > 0)` + + +Type-only imports MUST use `import type` at the top level, not inline `type` keyword within a value import. +❌ `import { type Foo, type Bar } from 'module'` (when importing ONLY types) +✅ `import type { Foo, Bar } from 'module'` +OK: `import { someValue, type Foo } from 'module'` (mixed value + type import) + + +Imports are auto-sorted by `simple-import-sort/imports`. When adding new imports, place them roughly in alphabetical order — the formatter will fix the exact order. If the pre-commit hook fails with "Run autofix to sort these imports!", the imports just need reordering. + + +Floating promises must have `.catch()` handlers. +✅ `.catch((err: unknown) => { showError(err) })` — standard for unexpected errors +✅ `.catch(() => {})` — ONLY for expected rejections (user cancelled modal, expected race condition) +The `(err: unknown)` typing is required by `@typescript-eslint/use-unknown-in-catch-callback-variable`. + + +Catch callbacks must type error as `unknown`. +❌ `.catch(err => ...)` or `.catch((err: any) => ...)` +✅ `.catch((err: unknown) => { showError(err) })` +For try/catch blocks, use `catch (e: unknown)` and narrow with type guards or assertions. + + +Functions must have explicit return types. +Fix: Add return type annotation. Common types: +- `void` for side-effect-only functions +- `React.ReactElement` or `React.ReactElement | null` for render helpers +- `Promise` for async functions with no return +- Specific type for functions that return values +❌ `function foo() { return 1 }` +✅ `function foo(): number { return 1 }` + + +Using deprecated API. +Fix: Check the deprecation message for the replacement API. Common replacements: +- `NavigationBase` → Read `/fix-eslint` skill `navigation-base` pattern for category-based fix guidance +- `uniqueIdentifier` → `EdgeSpendInfo.memos` +- `memo` → `EdgeSpendInfo.memos` +- `networkFee` / `parentNetworkFee` → `networkFees` +- `currencyCode` → `tokenId` +If no clear replacement exists, flag to user for guidance. + + +Event handler props must follow naming convention. +Fix: Rename handler to match the prop pattern. +- Props starting with `on` expect handlers starting with `handle` +❌ `onPress={openModal}` → ✅ `onPress={handleOpenModal}` +❌ `onChange={updateValue}` → ✅ `onChange={handleUpdateValue}` + + +Components should use `React.FC` pattern. +❌ `const Component = (props: Props): React.ReactElement => {` +✅ `export const Component: React.FC = props => {` + + +Generic components cannot use `React.FC` because it does not support type parameters. +If the generic is not essential (type param only used internally, can be collapsed into a concrete type), remove the generic and convert to `React.FC`. +If the generic is essential (callers rely on type inference, e.g. ` ...>`), keep the function declaration form with an explicit return type. The warning is accepted. +✅ `export function MyComponent(props: Props): React.ReactElement {` +❌ Converting an essential generic to `React.FC` — this loses type safety for callers. + + +Avoid `styled()` wrapper components. +Fix: Convert to regular component using `useTheme()` and `cacheStyles()`. +❌ `const StyledView = styled(View)(theme => ({ ... }))` +✅ Create a regular component: +```tsx +const MyView: React.FC = props => { + const theme = useTheme() + const styles = getStyles(theme) + return {props.children} +} +const getStyles = cacheStyles((theme: Theme) => ({ + container: { ... } +})) +``` +Note: This is an architectural change. If the file has many `styled()` usages, flag to user rather than refactoring inline. + + +When catching unknown errors that need property inspection, use `cleaners` instead of type assertions. +❌ `const err = e as { code?: string; message?: string }` +✅ Define a cleaner and use `asMaybe`: +```ts +const asFooError = asObject({ + code: asValue(FOO_CODE), + message: asOptional(asString, '') +}) +const fooError = asMaybe(asFooError)(e) +if (fooError != null) { ... } +``` +For generic error message extraction: +❌ `err.message ?? ''` (unsafe on `unknown`) +✅ `e instanceof Error ? e.message : String(e)` + + + diff --git a/.cursor/rules/workflow-halt-on-error.mdc b/.cursor/rules/workflow-halt-on-error.mdc new file mode 100644 index 0000000..d5424d3 --- /dev/null +++ b/.cursor/rules/workflow-halt-on-error.mdc @@ -0,0 +1,84 @@ +--- +description: Halt on workflow errors and detect slash-command invocations in user messages +alwaysApply: true +--- + + + +All workflow-related skill definitions (`*.md` / `SKILL.md`) and workflow companion scripts (`*.sh`) are sourced from `~/.cursor/`. When executing skills, prefer explicit `~/.cursor/...` paths and do not assume repo-local workflow files unless the skill explicitly points to one. + +When a skill mentions a script path, resolve it under `~/.cursor/skills//scripts/` unless the skill explicitly specifies an absolute path elsewhere. Do not assume repo-relative `scripts/` paths without verifying the skill directory contents. + +When ANY shell command fails (non-zero exit code) while executing an active skill workflow, a delegated subskill from that workflow, or a companion-script step required by that workflow (except where explicitly allowed by `auto-fix-verification-failures` or `companion-script-nonzero-contracts`): +1. **STOP** — do not retry, work around, substitute, or continue the workflow. +2. **Report** — show the user the exact command, exit code, and error output. +3. **Diagnose** — classify the failure: missing tool (`command not found`), wrong path, permissions, or logic error. +4. **Evaluate workflow** — if the failure reveals a gap in a skill definition, follow the fix-workflow-first rules below. +5. **Wait** — do not resume until the user responds. + + +When a workflow gap is discovered in an active skill definition: +1. **Stop immediately** — do not continue the current task or apply any workaround. +2. **Identify the root cause** in the skill (`.cursor/skills/*/SKILL.md`) definition. +3. **Propose the fix** to the user and wait for approval before proceeding. +4. **Fix the skill** using `/author` after approval. +5. **Resume the original task** only after the skill is updated. + +Fixing the skill takes **absolute priority** over all other actions — including workarounds, continuing the original task, or applying temporary fixes. Do NOT apply workarounds or manual fixes before proposing the skill update. The correct sequence is: identify gap → propose fix → get approval → apply fix → then resume original task. This applies to all workflow issues — missed steps, incorrect output, wrong tool usage, shell failures, formatting problems, etc. The skill is the source of truth; patching around it creates drift. + + +These workflow halt rules are for skill-driven execution, especially hands-off/orchestrated skills and their dependencies. They do not automatically apply to ad hoc exploration, incidental verification, or low-risk authoring work unless that command is part of an active skill contract. + +Exception to `halt-on-error`: For verification/code-quality failures where diagnostics are explicit and local, continue automatically with bounded remediation. + +Allowed auto-fix scope: +- TypeScript/compiler failures (`tsc`) with clear file/line diagnostics +- Lint failures (`eslint`) with clear file/line diagnostics +- Test failures (`jest`/`yarn test`) when stack traces or assertion output identify failing test files +- `verify-repo.sh` code-step failures that resolve to one of the above + +Required behavior: +1. Briefly log rationale: failure type, affected files, and why scope is unambiguous. +2. Apply the minimal fix in the failing repo. +3. Re-run the failing verification step. +4. Limit to 2 remediation attempts; if still failing or scope expands, fall back to `halt-on-error`. + +Never auto-fix: +- Missing tools/auth (`command not found`, `PROMPT_GH_AUTH`) +- Wrong path/permissions +- Companion script contract/usage failures +- Unexpected exit codes from orchestrator scripts +- Any failure requiring destructive operations or workflow bypasses + + +Respect documented companion script exit-code contracts. Non-zero does NOT always mean fatal. + +For `~/.cursor/skills/im/scripts/lint-warnings.sh`: +- `0` = no remaining lint findings after auto-fix +- `1` = remaining lint findings after auto-fix (expected actionable state) +- `2` = execution error (fatal) + +Required behavior: +1. If exit `1`, continue workflow by fixing the remaining lint findings before implementation. +2. If the script auto-fixes pre-existing lint issues, commit those changes in a separate lint-fix commit immediately before feature commits, even if no findings remain. +3. If exit `2`, apply `halt-on-error`. + + +Do NOT silently substitute an alternative tool or approach when a command fails. If `rg` is not found, do not fall back to `grep`. If a script exits non-zero, do not manually replicate what the script does. The failure is the signal — report it. + + + + + +Scan the user's message for `/word` tokens. A token is a **command invocation** when ALL of: +- `/word` is preceded by whitespace, a newline, or is at the start of the message +- `word` contains only lowercase letters and hyphens (e.g., `/im`, `/pr-create`, `/author`) +- `/word` is NOT inside a file path, URL, or code block + +When detected: +1. Read `~/.cursor/skills//SKILL.md` and follow it immediately. +2. If the file does not exist, inform the user: "Skill `/` not found in `~/.cursor/skills/`." + +**Ignore `/`** in: file paths (`/Users/...`, `~/...`), URLs (`https://...`), mid-word (`and/or`), backticks/code blocks. + + diff --git a/.cursor/scripts/port-to-opencode.sh b/.cursor/scripts/port-to-opencode.sh new file mode 100755 index 0000000..8c7599b --- /dev/null +++ b/.cursor/scripts/port-to-opencode.sh @@ -0,0 +1,224 @@ +#!/usr/bin/env bash +# port-to-opencode.sh — Convert Cursor .mdc/.md files to OpenCode-compatible JSON + MD mirrors. +# Single self-contained script (bash + inline node). No Python dependency. +# +# Usage: +# port-to-opencode.sh # Convert all rules and skills +# port-to-opencode.sh --dry-run # Show what would be done +# port-to-opencode.sh --validate # Validate existing JSON mirrors +# port-to-opencode.sh file1.mdc file2.md # Convert specific files +set -euo pipefail + +DRY_RUN=false +VALIDATE=false +FILES=() + +while [[ $# -gt 0 ]]; do + case "$1" in + --dry-run) DRY_RUN=true; shift ;; + --validate) VALIDATE=true; shift ;; + --sync) shift ;; # accepted for compat, no-op + *) FILES+=("$1"); shift ;; + esac +done + +exec node -e ' +const fs = require("fs") +const pathMod = require("path") +const os = require("os") + +const CURSOR_DIR = pathMod.join(os.homedir(), ".cursor") +const OPENCODE_DIR = pathMod.join(os.homedir(), ".config", "opencode") +const DRY_RUN = process.argv[1] === "true" +const VALIDATE = process.argv[2] === "true" +const inputFiles = process.argv.slice(3).filter(f => f) + +function parseYamlFrontmatter(content) { + const match = content.match(/^---\s*\n([\s\S]*?)\n---\s*\n/) + if (!match) return {} + const fm = {} + for (const line of match[1].split("\n")) { + const idx = line.indexOf(":") + if (idx === -1) continue + const key = line.substring(0, idx).trim() + let value = line.substring(idx + 1).trim() + if (value.startsWith("[") && value.endsWith("]")) { + try { value = JSON.parse(value.replace(/\x27/g, "\x22")) } catch {} + } else if (value === "true" || value === "false") { + value = value === "true" + } + fm[key] = value + } + return fm +} + +function extractTagContent(content, tag) { + const re = new RegExp("<" + tag + "[^>]*>([\\s\\S]*?)") + const m = content.match(re) + return m ? m[1].trim() : "" +} + +function extractGoal(content) { return extractTagContent(content, "goal") } + +function extractRules(content) { + const section = extractTagContent(content, "rules") + if (!section) return [] + const rules = [] + const re = /]*>([\s\S]*?)<\/rule>/g + let m + while ((m = re.exec(section)) !== null) { + let instruction = m[2].trim().replace(/\*\*/g, "").replace(/\s+/g, " ") + rules.push({ id: m[1], instruction }) + } + return rules +} + +function extractSteps(content) { + const steps = [] + const re = /]*>([\s\S]*?)<\/step>/g + let m + while ((m = re.exec(content)) !== null) { + steps.push({ id: m[1], name: m[2], instruction: m[3].trim() }) + } + return steps +} + +function extractScriptRefs(content) { + const refs = new Set() + const re = /[~]?\/[\w/\-.]+\.(sh|js)/g + let m + while ((m = re.exec(content)) !== null) refs.add(m[0]) + return [...refs].sort() +} + +function convertMdcToJson(filePath) { + const content = fs.readFileSync(filePath, "utf8") + const fm = parseYamlFrontmatter(content) + const basename = pathMod.basename(filePath, ".mdc") + return { + id: basename, title: basename, + description: fm.description || extractGoal(content), + globs: fm.globs || [], alwaysApply: fm.alwaysApply || false, + goal: extractGoal(content), rules: extractRules(content), + steps: extractSteps(content), scripts: extractScriptRefs(content) + } +} + +function convertCommandToJson(filePath) { + const content = fs.readFileSync(filePath, "utf8") + const basename = pathMod.basename(filePath, ".md") + const goal = extractGoal(content) + return { + id: basename, title: basename, description: goal, goal, + rules: extractRules(content), steps: extractSteps(content), + scripts: extractScriptRefs(content) + } +} + +function convertSkillToJson(filePath) { + const content = fs.readFileSync(filePath, "utf8") + const fm = parseYamlFrontmatter(content) + const basename = pathMod.basename(pathMod.dirname(filePath)) + return { + id: basename, title: fm.name || basename, name: fm.name || basename, + description: fm.description || extractGoal(content), + goal: extractGoal(content), rules: extractRules(content), + steps: extractSteps(content), scripts: extractScriptRefs(content) + } +} + +function convertToMd(content) { + let r = content + r = r.replace(/([\s\S]*?)<\/goal>/g, "## Goal\n\n$1\n") + r = r.replace(/]*>/g, "## Rules\n\n") + r = r.replace(/<\/rules>/g, "") + r = r.replace(//g, "- **$1**: ") + r = r.replace(/<\/rule>/g, "") + r = r.replace(//g, "### Step $1: $2\n\n") + r = r.replace(/<\/step>/g, "") + r = r.replace(//g, "#### $1\n\n") + r = r.replace(/<\/sub-step>/g, "") + r = r.replace(//g, "## Edge Cases\n\n") + r = r.replace(/<\/edge-cases>/g, "") + r = r.replace(//g, "### $1\n\n") + r = r.replace(/<\/case>/g, "") + r = r.replace(//g, "## Sequence: $1\n\n") + r = r.replace(/<\/sequence>/g, "") + r = r.replace(//g, "## Scope\n\n") + r = r.replace(/<\/scope>/g, "") + r = r.replace(/]*>/g, "## Standards\n\n") + r = r.replace(/<\/standards>/g, "") + r = r.replace(//g, "- **$1**: ") + r = r.replace(/<\/standard>/g, "") + while (r.includes("\n\n\n")) r = r.replace(/\n\n\n/g, "\n\n") + return r +} + +function processFile(filePath) { + let outputDir, outputBase, converter + if (filePath.includes("/rules/") && filePath.endsWith(".mdc")) { + outputDir = pathMod.join(OPENCODE_DIR, "rules") + outputBase = pathMod.basename(filePath, ".mdc") + converter = convertMdcToJson + } else if (filePath.includes("/skills/") && pathMod.basename(filePath) === "SKILL.md") { + outputDir = pathMod.join(OPENCODE_DIR, "skills", pathMod.basename(pathMod.dirname(filePath))) + outputBase = "SKILL" + converter = convertSkillToJson + } else { + return "Skipping: " + filePath + " (unknown type)" + } + + const jsonPath = pathMod.join(outputDir, outputBase + ".json") + const mdPath = pathMod.join(outputDir, outputBase + ".md") + + if (DRY_RUN) return "Would create: " + jsonPath + "\n Would create: " + mdPath + + fs.mkdirSync(outputDir, { recursive: true }) + const jsonData = converter(filePath) + const content = fs.readFileSync(filePath, "utf8") + fs.writeFileSync(jsonPath, JSON.stringify(jsonData, null, 2) + "\n") + fs.writeFileSync(mdPath, convertToMd(content)) + return "Converted: " + filePath + " -> " + jsonPath +} + +function validateJson(jsonPath) { + try { + const data = JSON.parse(fs.readFileSync(jsonPath, "utf8")) + const missing = ["id", "title", "description"].filter(f => !(f in data)) + if (missing.length) return "INVALID: " + jsonPath + " (missing: " + missing.join(", ") + ")" + return "VALID: " + jsonPath + } catch (e) { + return "INVALID: " + jsonPath + " (not valid JSON: " + e.message + ")" + } +} + +function walkDir(dir, predicate) { + const results = [] + try { + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { + const full = pathMod.join(dir, entry.name) + if (entry.isDirectory()) results.push(...walkDir(full, predicate)) + else if (predicate(full, entry.name)) results.push(full) + } + } catch {} + return results +} + +if (VALIDATE) { + console.log("Validating JSON mirrors...") + for (const f of walkDir(OPENCODE_DIR, (fp, n) => n.endsWith(".json"))) console.log(validateJson(f)) + process.exit(0) +} + +const files = inputFiles.length > 0 + ? inputFiles.map(f => f.startsWith("~") ? f.replace("~", os.homedir()) : f) + : [ + ...walkDir(pathMod.join(CURSOR_DIR, "rules"), (fp, n) => n.endsWith(".mdc")), + ...walkDir(pathMod.join(CURSOR_DIR, "skills"), (fp, n) => n === "SKILL.md") + ] + +console.log("Found " + files.length + " files to process") +for (const f of files) console.log(processFile(f)) +console.log("\nDone. Processed " + files.length + " files.") +if (DRY_RUN) console.log("Run without --dry-run to write files.") +' "$DRY_RUN" "$VALIDATE" ${FILES[@]+"${FILES[@]}"} diff --git a/.cursor/scripts/pr-status-gql.sh b/.cursor/scripts/pr-status-gql.sh new file mode 100755 index 0000000..b21c3ca --- /dev/null +++ b/.cursor/scripts/pr-status-gql.sh @@ -0,0 +1,429 @@ +#!/usr/bin/env bash +# pr-status-gql.sh — Fetch status of open PRs for a user (GraphQL API). +# Single run, no TUI. "New" comments = posted after the PR's last commit. +# +# Uses a single GraphQL query per poll. Separate rate limit budget from REST. +# +# Usage: +# pr-status-gql.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge] [--format text|json] +# pr-status-gql.sh # All repos for user in EdgeApp org +# pr-status-gql.sh --budget 0.5 # Reserve 50% of rate limit for other tools +# +# Requires: gh CLI (authenticated). +set -euo pipefail + +OWNER="EdgeApp" REPO="" USER="" FORMAT="text" BUDGET="0.67" +while [[ $# -gt 0 ]]; do + case "$1" in + --owner) OWNER="$2"; shift 2 ;; + --repo) REPO="$2"; shift 2 ;; + --user) USER="$2"; shift 2 ;; + --format) FORMAT="$2"; shift 2 ;; + --budget) BUDGET="$2"; shift 2 ;; + *) echo "Unknown arg: $1" >&2; exit 1 ;; + esac +done + +STATE_DIR="${TMPDIR:-/tmp}/pr-watch-gql-${OWNER}-${REPO:-all}" +mkdir -p "$STATE_DIR" +export STATE_DIR + +# Build the GraphQL query based on mode (single repo vs all repos) +PR_FIELDS=' + number title isDraft url headRefName updatedAt + repository { name nameWithOwner } + headRefOid + reviewDecision + reviews(last: 30) { + nodes { author { login } state submittedAt } + } + comments(last: 100) { + totalCount + nodes { author { login } createdAt bodyText } + } + reviewThreads(first: 100) { + nodes { + isResolved + comments(first: 5) { + nodes { author { login } createdAt bodyText path line } + } + } + } + commits(last: 1) { + nodes { + commit { + committedDate + oid + statusCheckRollup { + contexts(first: 20) { + nodes { + ... on CheckRun { + __typename name status conclusion + } + ... on StatusContext { + __typename context state + } + } + } + } + } + } + } +' + +if [[ -n "$REPO" ]]; then + QUERY=" + { + viewer { login } + repository(owner: \"${OWNER}\", name: \"${REPO}\") { + pullRequests(first: 50, states: OPEN, orderBy: {field: UPDATED_AT, direction: DESC}) { + nodes { + author { login } + ${PR_FIELDS} + } + } + } + rateLimit { cost remaining resetAt limit } + }" +else + QUERY=" + { + viewer { + login + pullRequests(first: 50, states: OPEN, orderBy: {field: UPDATED_AT, direction: DESC}) { + nodes { + ${PR_FIELDS} + } + } + } + rateLimit { cost remaining resetAt limit } + }" +fi + +# Execute query via gh CLI +GQL_RESULT=$(gh api graphql -f query="$QUERY" 2>&1) + +# Process the result with Node.js +exec node -e ' +const fs = require("fs") +const { OWNER, REPO, USER_ARG, FORMAT, BUDGET, STATE_DIR } = { + OWNER: process.argv[1], + REPO: process.argv[2] || "", + USER_ARG: process.argv[3], + FORMAT: process.argv[4], + BUDGET: parseFloat(process.argv[5]) || 0.67, + STATE_DIR: process.argv[6] +} +const gqlResult = JSON.parse(process.argv[7]) + +if (gqlResult.errors) { + process.stderr.write("GraphQL errors: " + JSON.stringify(gqlResult.errors) + "\n") + process.exit(1) +} + +const data = gqlResult.data + +// --- Determine user and extract raw PR nodes --- +let user +let rawNodes + +if (REPO) { + // Single-repo mode: repository.pullRequests, filtered by viewer login + user = USER_ARG || data.viewer?.login || "unknown" + rawNodes = (data.repository?.pullRequests?.nodes || []) + .filter(n => n.author?.login === user) +} else { + // All-repo mode: viewer.pullRequests (already scoped to authenticated user) + user = data.viewer?.login || USER_ARG || "unknown" + rawNodes = data.viewer?.pullRequests?.nodes || [] +} + +// --- Rate limit --- +const rateLimit = data.rateLimit || {} +const rlCost = rateLimit.cost || 1 +const rlRemaining = rateLimit.remaining +const rlLimit = rateLimit.limit +const rlResetAt = rateLimit.resetAt + +// --- NEW PR tracking --- +function loadPreviousPrNumbers() { + try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/known-prs.json`, "utf8")) } catch { return [] } +} +function savePrNumbers(numbers) { + fs.writeFileSync(`${STATE_DIR}/known-prs.json`, JSON.stringify(numbers)) +} + +const previousPrNumbers = loadPreviousPrNumbers() +const currentPrNumbers = rawNodes.map(n => n.number) +const newPrNumbers = new Set(currentPrNumbers.filter(n => !previousPrNumbers.includes(n))) +savePrNumbers(currentPrNumbers) + +// --- Transform GQL nodes to result format --- +function checkInfo(contexts, name) { + const run = (contexts || []).find(c => c.__typename === "CheckRun" && c.name === name) + if (!run) return { status: "none", conclusion: null } + return { status: run.status?.toLowerCase() || "none", conclusion: run.conclusion?.toLowerCase() || null } +} + +function relTime(iso) { + if (!iso) return "-" + const ms = Date.now() - new Date(iso).getTime() + const m = Math.floor(ms / 60000) + if (m < 60) return m + "m ago" + const h = Math.floor(m / 60) + if (h < 24) return h + "h ago" + return Math.floor(h / 24) + "d ago" +} + +const results = rawNodes.map(pr => { + const repo = pr.repository?.name || REPO + const n = pr.number + const sha = pr.headRefOid?.substring(0, 7) || "?" + const lastCommitNode = pr.commits?.nodes?.[0]?.commit + const lastCommitDate = lastCommitNode?.committedDate || null + const contexts = lastCommitNode?.statusCheckRollup?.contexts?.nodes || [] + + // Collect review thread comments (inline review comments) + const reviewThreadComments = [] + for (const thread of (pr.reviewThreads?.nodes || [])) { + for (const c of (thread.comments?.nodes || [])) { + if (c.author?.login !== user) { + reviewThreadComments.push({ + user: c.author?.login, + body: c.bodyText?.substring(0, 120), + at: c.createdAt, + path: c.path, + line: c.line, + type: "review" + }) + } + } + } + + // Issue comments + const issueComments = (pr.comments?.nodes || []) + .filter(c => c.author?.login !== user) + .map(c => ({ + user: c.author?.login, + body: c.bodyText?.substring(0, 120), + at: c.createdAt, + type: "issue" + })) + + const allComments = [...reviewThreadComments, ...issueComments] + .sort((a, b) => b.at.localeCompare(a.at)) + + // Split into new (after last commit) and old + const newComments = lastCommitDate + ? allComments.filter(c => c.at > lastCommitDate) + : [] + const oldComments = lastCommitDate + ? allComments.filter(c => c.at <= lastCommitDate) + : allComments + + // Review approval status — dedupe to latest review per human user + const latestByUser = {} + for (const r of (pr.reviews?.nodes || [])) { + const login = r.author?.login + if (!login || login.endsWith("[bot]")) continue + if (login === user) continue + if (!latestByUser[login] || r.submittedAt > latestByUser[login].submittedAt) { + latestByUser[login] = r + } + } + const approvals = Object.values(latestByUser).filter(r => r.state === "APPROVED").map(r => r.author.login) + const changesRequested = Object.values(latestByUser).filter(r => r.state === "CHANGES_REQUESTED").map(r => r.author.login) + const reviewerCount = Object.keys(latestByUser).length + + return { + number: n, + repo, + title: pr.title, + branch: pr.headRefName, + draft: pr.isDraft, + isNew: newPrNumbers.has(n), + lastCommitSha: sha, + lastCommitDate, + comments: { + total: allComments.length, + new: newComments.length, + old: oldComments.length, + newComments: newComments.map(c => ({ user: c.user, at: c.at, path: c.path, line: c.line, body: c.body })), + latest: allComments[0] ? { user: allComments[0].user, at: allComments[0].at } : null + }, + reviews: { + approvals, + changesRequested, + reviewerCount + }, + checks: { + bugbot: checkInfo(contexts, "Cursor Bugbot"), + ci: checkInfo(contexts, "Travis CI - Pull Request"), + codeql: checkInfo(contexts, "Analyze (javascript-typescript)") + } + } +}) + +// Calculate recommended interval +const secsUntilReset = rlResetAt ? Math.max(1, Math.floor((new Date(rlResetAt).getTime() - Date.now()) / 1000)) : 3600 +const budgetCalls = rlRemaining != null ? Math.floor(rlRemaining * BUDGET) : 2500 +const pollsAvailable = budgetCalls > 0 ? Math.floor(budgetCalls / rlCost) : 1 +const recommendedInterval = Math.max(30, Math.ceil(secsUntilReset / pollsAvailable)) + +const meta = { + backend: "graphql", + queryCost: rlCost, + rateLimitRemaining: rlRemaining, + rateLimitLimit: rlLimit, + rateLimitResetAt: rlResetAt, + recommendedInterval +} + +if (FORMAT === "json") { + console.log(JSON.stringify({ user, owner: OWNER, repo: REPO || null, timestamp: new Date().toISOString(), meta, prs: results }, null, 2)) + process.exit(0) +} + +// Text output — FORCE_COLOR env var overrides TTY detection (for pr-watch subshell) +const IS_TTY = process.env.FORCE_COLOR === "1" || process.stdout.isTTY +const B = IS_TTY ? "\x1b[1m" : "" +const D = IS_TTY ? "\x1b[2m" : "" +const R = IS_TTY ? "\x1b[0m" : "" +const GR = IS_TTY ? "\x1b[32m" : "" +const YL = IS_TTY ? "\x1b[33m" : "" +const RD = IS_TTY ? "\x1b[31m" : "" +const CY = IS_TTY ? "\x1b[36m" : "" +const MG = IS_TTY ? "\x1b[35m" : "" +const LINE = "─".repeat(72) +const multiRepo = !REPO + +function fmtCheck(label, c) { + if (c.status === "none") return D + label + " —" + R + if (c.status !== "completed") return YL + "⏳ " + label + R + if (c.conclusion === "success") return GR + "✅ " + label + R + if (c.conclusion === "neutral") return YL + "⚠️ " + label + R + if (c.conclusion === "failure") return RD + "❌ " + label + R + return label + " " + (c.conclusion || "?") +} + +function fmtReview(pr) { + const { approvals, changesRequested, reviewerCount } = pr.reviews + if (changesRequested.length > 0) + return `${RD}❌ Changes requested${R} ${D}(${changesRequested.join(", ")})${R}` + if (approvals.length > 0 && approvals.length >= reviewerCount && reviewerCount > 0) + return `${GR}✅ Approved${R} ${D}(${approvals.join(", ")})${R}` + if (approvals.length > 0) + return `${GR}👍 ${approvals.length}/${reviewerCount} approved${R} ${D}(${approvals.join(", ")})${R}` + if (reviewerCount > 0) + return `${YL}👀 Awaiting review${R}` + return `${D}No reviews${R}` +} + +function prState(pr) { + const hasApproval = pr.reviews.approvals.length > 0 + const hasChangesRequested = pr.reviews.changesRequested.length > 0 + const hasNew = pr.comments.new > 0 + const bugbotOk = pr.checks.bugbot.conclusion === "success" || pr.checks.bugbot.status === "none" + const ciOk = pr.checks.ci.conclusion === "success" || pr.checks.ci.status === "none" + const ciFail = pr.checks.ci.conclusion === "failure" + const ciPending = pr.checks.ci.status !== "completed" && pr.checks.ci.status !== "none" + const bugbotPending = pr.checks.bugbot.status !== "completed" && pr.checks.bugbot.status !== "none" + const bugbotIssues = pr.checks.bugbot.conclusion === "neutral" + const checksGreen = bugbotOk && ciOk + + if (ciFail || hasChangesRequested) + return { tier: 5, tag: `${RD}${B}BLOCKED${R}`, emoji: "🔴" } + if (hasNew || bugbotIssues) + return { tier: 4, tag: `${YL}${B}ATTENTION${R}`, emoji: "🟡" } + if (ciPending || bugbotPending) + return { tier: 3, tag: `${YL}PENDING${R}`, emoji: "⏳" } + if (hasApproval && checksGreen) + return { tier: 0, tag: `${GR}${B}READY${R}`, emoji: "🚀" } + if (hasApproval) + return { tier: 1, tag: `${GR}APPROVED${R}`, emoji: "👍" } + if (checksGreen) + return { tier: 2, tag: `${GR}CLEAR${R}`, emoji: "🟢" } + return { tier: 3, tag: `${D}OPEN${R}`, emoji: "⚪" } +} + +function sortedPRs(list) { + return [...list].sort((a, b) => { + const ta = prState(a).tier, tb = prState(b).tier + if (ta !== tb) return ta - tb + const da = a.comments.latest?.at || a.lastCommitDate || "" + const db = b.comments.latest?.at || b.lastCommitDate || "" + return db.localeCompare(da) + }) +} + +function renderPR(pr, indent) { + const state = prState(pr) + const draft = pr.draft ? ` ${D}[draft]${R}` : "" + const newPrTag = pr.isNew ? ` ${MG}${B}NEW${R}` : "" + const title = pr.title.length > 45 ? pr.title.substring(0, 42) + "..." : pr.title + const newTag = pr.comments.new > 0 + ? ` ${RD}${B}🔔 +${pr.comments.new} new${R}` + : "" + const latestInfo = pr.comments.latest + ? `${D}${pr.comments.latest.user} ${relTime(pr.comments.latest.at)}${R}` + : `${D}none${R}` + const pad = " ".repeat(indent) + const prUrl = `https://github.com/${OWNER}/${pr.repo}/pull/${pr.number}` + + const lines = [] + lines.push(`${pad}${state.emoji} ${state.tag} ${B}#${pr.number}${R}${draft}${newPrTag} ${CY}${title}${R}`) + lines.push(`${pad} ${D}↳${R} ${MG}${pr.branch}${R} ${D}${prUrl}${R}`) + lines.push(`${pad} ${fmtReview(pr)}`) + lines.push(`${pad} 💬 ${pr.comments.total}${newTag} ${D}latest:${R} ${latestInfo}`) + lines.push(`${pad} ${fmtCheck("Bugbot", pr.checks.bugbot)} ${fmtCheck("CI", pr.checks.ci)} ${fmtCheck("CodeQL", pr.checks.codeql)}`) + return lines +} + +const scope = REPO ? `${OWNER}/${REPO}` : `${OWNER}/*` +const out = [] +out.push(`${B}${scope}${R} ${D}— ${user} — ${results.length} open PR(s)${R}`) +out.push(`${D}${LINE}${R}`) + +if (!results.length) { + out.push(`${D}No open PRs by ${user}${R}`) +} else if (multiRepo) { + const byRepo = {} + for (const pr of results) { + if (!byRepo[pr.repo]) byRepo[pr.repo] = [] + byRepo[pr.repo].push(pr) + } + const repoOrder = Object.keys(byRepo).sort((a, b) => { + const latestA = sortedPRs(byRepo[a])[0] + const latestB = sortedPRs(byRepo[b])[0] + const da = latestA.comments.latest?.at || latestA.lastCommitDate || "" + const db = latestB.comments.latest?.at || latestB.lastCommitDate || "" + return db.localeCompare(da) + }) + for (const repo of repoOrder) { + out.push(``) + out.push(`${B}${repo}${R} ${D}(${byRepo[repo].length})${R}`) + for (const pr of sortedPRs(byRepo[repo])) { + out.push("") + out.push(...renderPR(pr, 2)) + } + } +} else { + for (const pr of sortedPRs(results)) { + out.push("") + out.push(...renderPR(pr, 0)) + } +} + +// Footer with rate limit info +out.push("") +const rlInfo = rlRemaining != null + ? `GQL: ${rlRemaining}/${rlLimit} remaining (cost ${rlCost})` + : "GQL: unknown" +out.push(`${D}${LINE}${R}`) +out.push(`${D}${rlInfo} | next: ${recommendedInterval}s${R}`) + +// Machine-readable line for pr-watch.sh to parse +out.push(`# interval:${recommendedInterval}`) + +console.log(out.join("\n")) +' "$OWNER" "$REPO" "$USER" "$FORMAT" "$BUDGET" "$STATE_DIR" "$GQL_RESULT" diff --git a/.cursor/scripts/pr-status.sh b/.cursor/scripts/pr-status.sh new file mode 100755 index 0000000..44519c7 --- /dev/null +++ b/.cursor/scripts/pr-status.sh @@ -0,0 +1,407 @@ +#!/usr/bin/env bash +# pr-status.sh — Fetch status of open PRs for a user via gh CLI. +# Single run, no TUI. "New" comments = posted after the PR's last commit. +# +# Uses gh CLI for all API access (no GITHUB_TOKEN needed). +# Per-PR updated_at caching to skip detail fetches for unchanged PRs. +# +# Usage: +# pr-status.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge] [--format text|json] +# pr-status.sh # All repos for user in EdgeApp org +# pr-status.sh --user Jon-edge # All repos for specific user in EdgeApp org +# +# Requires: gh CLI (authenticated), node. +set -euo pipefail + +OWNER="EdgeApp" REPO="" USER="" FORMAT="text" +while [[ $# -gt 0 ]]; do + case "$1" in + --owner) OWNER="$2"; shift 2 ;; + --repo) REPO="$2"; shift 2 ;; + --user) USER="$2"; shift 2 ;; + --format) FORMAT="$2"; shift 2 ;; + *) echo "Unknown arg: $1" >&2; exit 1 ;; + esac +done + +command -v gh &>/dev/null || { echo "Error: gh CLI not found. Install: https://cli.github.com" >&2; exit 2; } +gh auth status &>/dev/null 2>&1 || { echo "Error: gh not authenticated. Run: gh auth login" >&2; exit 2; } + +STATE_DIR="${TMPDIR:-/tmp}/pr-watch-${OWNER}-${REPO:-all}" +mkdir -p "$STATE_DIR" +export STATE_DIR + +exec node -e ' +const { execFile } = require("child_process") +const fs = require("fs") +const { OWNER, REPO, USER, FORMAT } = { + OWNER: process.argv[1], + REPO: process.argv[2] || "", + USER: process.argv[3], + FORMAT: process.argv[4] +} +const STATE_DIR = process.env.STATE_DIR + +let apiCallCount = 0 + +function ghFetch(path, extraArgs) { + return new Promise((resolve) => { + apiCallCount++ + const args = ["api", path] + if (extraArgs) args.push(...extraArgs) + execFile("gh", args, { encoding: "utf8", maxBuffer: 10 * 1024 * 1024 }, (err, stdout) => { + if (err) { resolve(null); return } + try { resolve(JSON.parse(stdout)) } catch { resolve(null) } + }) + }) +} + +// --- Per-PR updated_at caching --- +function loadPrCache(number) { + try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/pr-${number}.json`, "utf8")) } catch { return null } +} + +function savePrCache(number, result, updatedAt) { + fs.writeFileSync(`${STATE_DIR}/pr-${number}.json`, JSON.stringify({ updatedAt, result })) +} + +function loadPreviousPrNumbers() { + try { return JSON.parse(fs.readFileSync(`${STATE_DIR}/known-prs.json`, "utf8")) } catch { return [] } +} + +function savePrNumbers(numbers) { + fs.writeFileSync(`${STATE_DIR}/known-prs.json`, JSON.stringify(numbers)) +} + +// --- Concurrency limiter --- +async function pool(items, concurrency, fn) { + const results = new Array(items.length) + let next = 0 + async function worker() { + while (next < items.length) { + const i = next++ + results[i] = await fn(items[i], i) + } + } + await Promise.all(Array.from({ length: Math.min(concurrency, items.length) }, () => worker())) + return results +} + +// --- Utilities --- +function relTime(iso) { + if (!iso) return "-" + const ms = Date.now() - new Date(iso).getTime() + const m = Math.floor(ms / 60000) + if (m < 60) return m + "m ago" + const h = Math.floor(m / 60) + if (h < 24) return h + "h ago" + return Math.floor(h / 24) + "d ago" +} + +function checkInfo(runs, name) { + const run = (runs || []).find(c => c.name === name) + if (!run) return { status: "none", conclusion: null } + return { status: run.status, conclusion: run.conclusion } +} + +async function main() { + let user = USER + if (!user) { + const me = await ghFetch("/user") + user = me?.login || "unknown" + } + + const previousPrNumbers = loadPreviousPrNumbers() + + let prs + if (REPO) { + const allPRs = await ghFetch(`/repos/${OWNER}/${REPO}/pulls?state=open&per_page=30`) + if (!Array.isArray(allPRs)) { + process.stderr.write("API error fetching PRs\n") + process.exit(1) + } + prs = allPRs + .filter(p => p.user.login === user) + .map(p => ({ ...p, _repo: REPO })) + } else { + const q = encodeURIComponent(`type:pr state:open author:${user} org:${OWNER}`) + const search = await ghFetch(`/search/issues?q=${q}&per_page=50&sort=updated&order=desc`) + if (!search?.items) { + process.stderr.write("API error searching PRs\n") + process.exit(1) + } + prs = await pool(search.items, 4, async item => { + const repo = item.repository_url.split("/").pop() + const full = await ghFetch(`/repos/${OWNER}/${repo}/pulls/${item.number}`) + return { ...full, _repo: repo } + }) + } + + const currentPrNumbers = prs.map(p => p.number) + const newPrNumbers = new Set(currentPrNumbers.filter(n => !previousPrNumbers.includes(n))) + savePrNumbers(currentPrNumbers) + + let changedPrCount = 0 + + const results = await pool(prs, 4, async pr => { + const repo = pr._repo + const n = pr.number + const sha = pr.head.sha + const updatedAt = pr.updated_at + + const cached = loadPrCache(n) + if (cached && cached.updatedAt === updatedAt && !newPrNumbers.has(n)) { + return { ...cached.result, isNew: false } + } + + changedPrCount++ + + const [inline, issue, checks, commits, reviews] = await Promise.all([ + ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/comments?per_page=100`), + ghFetch(`/repos/${OWNER}/${repo}/issues/${n}/comments?per_page=100`), + ghFetch(`/repos/${OWNER}/${repo}/commits/${sha}/check-runs`), + ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/commits?per_page=100`), + ghFetch(`/repos/${OWNER}/${repo}/pulls/${n}/reviews?per_page=100`) + ]) + + const commitList = Array.isArray(commits) ? commits : [] + const lastCommit = commitList.length > 0 ? commitList[commitList.length - 1] : null + const lastCommitDate = lastCommit?.commit?.committer?.date + || lastCommit?.commit?.author?.date + || null + + const allComments = [ + ...(Array.isArray(inline) ? inline : []) + .filter(c => c.user?.login !== user) + .map(c => ({ id: c.id, user: c.user?.login, body: c.body?.substring(0, 120), at: c.created_at, path: c.path, line: c.line, type: "review" })), + ...(Array.isArray(issue) ? issue : []) + .filter(c => c.user?.login !== user) + .map(c => ({ id: c.id, user: c.user?.login, body: c.body?.substring(0, 120), at: c.created_at, type: "issue" })) + ].sort((a, b) => b.at.localeCompare(a.at)) + + const newComments = lastCommitDate + ? allComments.filter(c => c.at > lastCommitDate) + : [] + const oldComments = lastCommitDate + ? allComments.filter(c => c.at <= lastCommitDate) + : allComments + + const checkRuns = checks?.check_runs || [] + + const reviewList = Array.isArray(reviews) ? reviews : [] + const latestByUser = {} + for (const r of reviewList) { + const login = r.user?.login + if (!login || login.endsWith("[bot]")) continue + if (login === user) continue + if (!latestByUser[login] || r.submitted_at > latestByUser[login].submitted_at) { + latestByUser[login] = r + } + } + const approvals = Object.values(latestByUser).filter(r => r.state === "APPROVED").map(r => r.user.login) + const changesRequested = Object.values(latestByUser).filter(r => r.state === "CHANGES_REQUESTED").map(r => r.user.login) + const reviewerCount = Object.keys(latestByUser).length + + const result = { + number: n, + repo, + title: pr.title, + branch: pr.head.ref, + draft: pr.draft, + isNew: newPrNumbers.has(n), + lastCommitSha: sha.substring(0, 7), + lastCommitDate, + comments: { + total: allComments.length, + new: newComments.length, + old: oldComments.length, + newComments: newComments.map(c => ({ user: c.user, at: c.at, path: c.path, line: c.line, body: c.body })), + latest: allComments[0] ? { user: allComments[0].user, at: allComments[0].at } : null + }, + reviews: { + approvals, + changesRequested, + reviewerCount + }, + checks: { + bugbot: checkInfo(checkRuns, "Cursor Bugbot"), + ci: checkInfo(checkRuns, "Travis CI - Pull Request"), + codeql: checkInfo(checkRuns, "Analyze (javascript-typescript)") + } + } + + savePrCache(n, result, updatedAt) + return result + }) + + // Fetch rate limit info + const rateLimit = await ghFetch("/rate_limit") + const rateLimitRemaining = rateLimit?.resources?.core?.remaining ?? null + const rateLimitLimit = rateLimit?.resources?.core?.limit ?? null + const rateLimitReset = rateLimit?.resources?.core?.reset ?? null + + const callsPerPoll = apiCallCount + const secsUntilReset = rateLimitReset ? Math.max(1, rateLimitReset - Math.floor(Date.now() / 1000)) : 3600 + const budgetCalls = rateLimitRemaining != null ? Math.floor(rateLimitRemaining * 0.67) : 2500 + const recommendedInterval = budgetCalls > 0 ? Math.max(30, Math.ceil(secsUntilReset / (budgetCalls / callsPerPoll))) : 300 + + const meta = { + apiCalls: apiCallCount, + changedPrs: changedPrCount, + rateLimitRemaining, + rateLimitLimit, + rateLimitReset, + recommendedInterval + } + + if (FORMAT === "json") { + console.log(JSON.stringify({ user, owner: OWNER, repo: REPO || null, timestamp: new Date().toISOString(), meta, prs: results }, null, 2)) + return + } + + // Text output — FORCE_COLOR env var overrides TTY detection (for pr-watch subshell) + const IS_TTY = process.env.FORCE_COLOR === "1" || process.stdout.isTTY + const B = IS_TTY ? "\x1b[1m" : "" + const D = IS_TTY ? "\x1b[2m" : "" + const R = IS_TTY ? "\x1b[0m" : "" + const GR = IS_TTY ? "\x1b[32m" : "" + const YL = IS_TTY ? "\x1b[33m" : "" + const RD = IS_TTY ? "\x1b[31m" : "" + const CY = IS_TTY ? "\x1b[36m" : "" + const MG = IS_TTY ? "\x1b[35m" : "" + const LINE = "─".repeat(72) + const multiRepo = !REPO + + function fmtCheck(label, c) { + if (c.status === "none") return D + label + " —" + R + if (c.status !== "completed") return YL + "⏳ " + label + R + if (c.conclusion === "success") return GR + "✅ " + label + R + if (c.conclusion === "neutral") return YL + "⚠️ " + label + R + if (c.conclusion === "failure") return RD + "❌ " + label + R + return label + " " + (c.conclusion || "?") + } + + function fmtReview(pr) { + const { approvals, changesRequested, reviewerCount } = pr.reviews + if (changesRequested.length > 0) + return `${RD}❌ Changes requested${R} ${D}(${changesRequested.join(", ")})${R}` + if (approvals.length > 0 && approvals.length >= reviewerCount && reviewerCount > 0) + return `${GR}✅ Approved${R} ${D}(${approvals.join(", ")})${R}` + if (approvals.length > 0) + return `${GR}👍 ${approvals.length}/${reviewerCount} approved${R} ${D}(${approvals.join(", ")})${R}` + if (reviewerCount > 0) + return `${YL}👀 Awaiting review${R}` + return `${D}No reviews${R}` + } + + function prState(pr) { + const hasApproval = pr.reviews.approvals.length > 0 + const hasChangesRequested = pr.reviews.changesRequested.length > 0 + const hasNew = pr.comments.new > 0 + const bugbotOk = pr.checks.bugbot.conclusion === "success" || pr.checks.bugbot.status === "none" + const ciOk = pr.checks.ci.conclusion === "success" || pr.checks.ci.status === "none" + const ciFail = pr.checks.ci.conclusion === "failure" + const ciPending = pr.checks.ci.status !== "completed" && pr.checks.ci.status !== "none" + const bugbotPending = pr.checks.bugbot.status !== "completed" && pr.checks.bugbot.status !== "none" + const bugbotIssues = pr.checks.bugbot.conclusion === "neutral" + const checksGreen = bugbotOk && ciOk + + if (ciFail || hasChangesRequested) + return { tier: 5, tag: `${RD}${B}BLOCKED${R}`, emoji: "🔴" } + if (hasNew || bugbotIssues) + return { tier: 4, tag: `${YL}${B}ATTENTION${R}`, emoji: "🟡" } + if (ciPending || bugbotPending) + return { tier: 3, tag: `${YL}PENDING${R}`, emoji: "⏳" } + if (hasApproval && checksGreen) + return { tier: 0, tag: `${GR}${B}READY${R}`, emoji: "🚀" } + if (hasApproval) + return { tier: 1, tag: `${GR}APPROVED${R}`, emoji: "👍" } + if (checksGreen) + return { tier: 2, tag: `${GR}CLEAR${R}`, emoji: "🟢" } + return { tier: 3, tag: `${D}OPEN${R}`, emoji: "⚪" } + } + + function sortedPRs(list) { + return [...list].sort((a, b) => { + const ta = prState(a).tier, tb = prState(b).tier + if (ta !== tb) return ta - tb + const da = a.comments.latest?.at || a.lastCommitDate || "" + const db = b.comments.latest?.at || b.lastCommitDate || "" + return db.localeCompare(da) + }) + } + + function renderPR(pr, indent) { + const state = prState(pr) + const draft = pr.draft ? ` ${D}[draft]${R}` : "" + const newPrTag = pr.isNew ? ` ${MG}${B}NEW${R}` : "" + const title = pr.title.length > 45 ? pr.title.substring(0, 42) + "..." : pr.title + const newTag = pr.comments.new > 0 + ? ` ${RD}${B}🔔 +${pr.comments.new} new${R}` + : "" + const latestInfo = pr.comments.latest + ? `${D}${pr.comments.latest.user} ${relTime(pr.comments.latest.at)}${R}` + : `${D}none${R}` + const pad = " ".repeat(indent) + const prUrl = `https://github.com/${OWNER}/${pr.repo}/pull/${pr.number}` + + const lines = [] + lines.push(`${pad}${state.emoji} ${state.tag} ${B}#${pr.number}${R}${draft}${newPrTag} ${CY}${title}${R}`) + lines.push(`${pad} ${D}↳${R} ${MG}${pr.branch}${R} ${D}${prUrl}${R}`) + lines.push(`${pad} ${fmtReview(pr)}`) + lines.push(`${pad} 💬 ${pr.comments.total}${newTag} ${D}latest:${R} ${latestInfo}`) + lines.push(`${pad} ${fmtCheck("Bugbot", pr.checks.bugbot)} ${fmtCheck("CI", pr.checks.ci)} ${fmtCheck("CodeQL", pr.checks.codeql)}`) + return lines + } + + const scope = REPO ? `${OWNER}/${REPO}` : `${OWNER}/*` + const out = [] + out.push(`${B}${scope}${R} ${D}— ${user} — ${results.length} open PR(s)${R}`) + out.push(`${D}${LINE}${R}`) + + if (!results.length) { + out.push(`${D}No open PRs by ${user}${R}`) + } else if (multiRepo) { + const byRepo = {} + for (const pr of results) { + if (!byRepo[pr.repo]) byRepo[pr.repo] = [] + byRepo[pr.repo].push(pr) + } + const repoOrder = Object.keys(byRepo).sort((a, b) => { + const latestA = sortedPRs(byRepo[a])[0] + const latestB = sortedPRs(byRepo[b])[0] + const da = latestA.comments.latest?.at || latestA.lastCommitDate || "" + const db = latestB.comments.latest?.at || latestB.lastCommitDate || "" + return db.localeCompare(da) + }) + for (const repo of repoOrder) { + out.push(``) + out.push(`${B}${repo}${R} ${D}(${byRepo[repo].length})${R}`) + for (const pr of sortedPRs(byRepo[repo])) { + out.push("") + out.push(...renderPR(pr, 2)) + } + } + } else { + for (const pr of sortedPRs(results)) { + out.push("") + out.push(...renderPR(pr, 0)) + } + } + + // Footer with rate limit info + out.push("") + const rlInfo = rateLimitRemaining != null + ? `API: ${rateLimitRemaining}/${rateLimitLimit} remaining` + : "API: unknown" + out.push(`${D}${LINE}${R}`) + out.push(`${D}${rlInfo} | ${apiCallCount} calls | next: ${recommendedInterval}s${R}`) + + // Machine-readable line for pr-watch.sh to parse + out.push(`# interval:${recommendedInterval}`) + + console.log(out.join("\n")) +} + +main().catch(e => { process.stderr.write("Error: " + e.message + "\n"); process.exit(1) }) +' "$OWNER" "$REPO" "$USER" "$FORMAT" diff --git a/.cursor/scripts/pr-watch.sh b/.cursor/scripts/pr-watch.sh new file mode 100755 index 0000000..e257d5b --- /dev/null +++ b/.cursor/scripts/pr-watch.sh @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# pr-watch.sh — TUI wrapper around pr-status scripts. +# Redraws in-place on each poll. Ctrl+C to stop. +# +# Usage: +# pr-watch.sh --repo edge-react-gui [--owner EdgeApp] [--user Jon-edge] +# pr-watch.sh # All repos, auto interval, GQL backend +# pr-watch.sh --backend rest # Force REST backend +# pr-watch.sh --interval 60 # Override interval (clamped to safe minimum) +# pr-watch.sh --budget 0.5 # Reserve 50% of rate limit budget +# pr-watch.sh --once [...] # Single poll, no clear, no loop. For agent/script use. +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ARGS=() INTERVAL="" ONCE=false BACKEND="" BUDGET="" +while [[ $# -gt 0 ]]; do + case "$1" in + --interval) INTERVAL="$2"; shift 2 ;; + --once) ONCE=true; shift ;; + --backend) BACKEND="$2"; shift 2 ;; + --budget) BUDGET="$2"; shift 2 ;; + *) ARGS+=("$1"); shift ;; + esac +done + +# Inject --owner default if not already in ARGS +if [[ ${#ARGS[@]} -eq 0 ]] || ! printf '%s\n' "${ARGS[@]}" | grep -q -- '--owner'; then + ARGS+=(--owner EdgeApp) +fi + +# Auto-detect backend: prefer gql if gh CLI is available +if [[ -z "$BACKEND" ]]; then + if command -v gh &>/dev/null && gh auth status &>/dev/null; then + BACKEND="gql" + else + BACKEND="rest" + fi +fi + +# Select the status script +if [[ "$BACKEND" == "gql" ]]; then + STATUS_SCRIPT="$SCRIPT_DIR/pr-status-gql.sh" +else + STATUS_SCRIPT="$SCRIPT_DIR/pr-status.sh" +fi + +# Pass budget through if specified +if [[ -n "$BUDGET" ]]; then + ARGS+=(--budget "$BUDGET") +fi + +if $ONCE; then + NOW=$(date '+%H:%M:%S') + printf '%s\n' "PR Watch — ${NOW} (${BACKEND})" + "$STATUS_SCRIPT" "${ARGS[@]}" --format text + exit $? +fi + +# TUI loop +CURRENT_INTERVAL="${INTERVAL:-60}" + +while true; do + OUTPUT=$(FORCE_COLOR=1 "$STATUS_SCRIPT" "${ARGS[@]}" --format text 2>&1) || true + NOW=$(date '+%H:%M:%S') + + # Parse recommended interval from script output + RECOMMENDED=$(echo "$OUTPUT" | grep -oP '(?<=^# interval:)\d+' || echo "") + + # Determine actual sleep interval + if [[ -n "$INTERVAL" ]]; then + # User-specified interval: clamp to at least the recommended minimum + if [[ -n "$RECOMMENDED" ]] && [[ "$INTERVAL" -lt "$RECOMMENDED" ]]; then + CURRENT_INTERVAL="$RECOMMENDED" + else + CURRENT_INTERVAL="$INTERVAL" + fi + elif [[ -n "$RECOMMENDED" ]]; then + CURRENT_INTERVAL="$RECOMMENDED" + fi + + # Strip the machine-readable line from display output + DISPLAY_OUTPUT=$(echo "$OUTPUT" | grep -v '^# interval:') + + printf '\033[H\033[2J' + printf '%s\n' "PR Watch — ${NOW} (${BACKEND}, next in ${CURRENT_INTERVAL}s, Ctrl+C to stop)" + printf '%s\n' "$DISPLAY_OUTPUT" + sleep "$CURRENT_INTERVAL" +done diff --git a/.cursor/scripts/push-env-key.sh b/.cursor/scripts/push-env-key.sh new file mode 100755 index 0000000..fceb8d5 --- /dev/null +++ b/.cursor/scripts/push-env-key.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +# push-env-key.sh — Update a single key in the server's env.json and push +# +# Usage: push-env-key.sh [-m "commit message"] +# +# Examples: +# push-env-key.sh EDGE_API_KEY abc123 +# push-env-key.sh EDGE_API_KEY abc123 -m "Rotate Edge API key" + +set -euo pipefail + +SERVER="jack" +REMOTE_REPO="/home/jon/jenkins-files/master" + +KEY="" +VALUE="" +COMMIT_MSG="" + +while [[ $# -gt 0 ]]; do + case "$1" in + -m) COMMIT_MSG="$2"; shift 2 ;; + *) + if [[ -z "$KEY" ]]; then KEY="$1" + elif [[ -z "$VALUE" ]]; then VALUE="$1" + else echo "Unexpected argument: $1" >&2; exit 1 + fi + shift ;; + esac +done + +if [[ -z "$KEY" || -z "$VALUE" ]]; then + echo "Usage: push-env-key.sh [-m \"commit message\"]" >&2 + exit 1 +fi + +if [[ -z "$COMMIT_MSG" ]]; then + COMMIT_MSG="Update $KEY in env.json" +fi + +ssh "$SERVER" bash -s -- "$KEY" "$VALUE" "$COMMIT_MSG" "$REMOTE_REPO" <<'REMOTE' + set -euo pipefail + KEY="$1" + VALUE="$2" + MSG="$3" + REPO="$4" + + cd "$REPO" + git pull --ff-only + + CURRENT=$(jq -r --arg k "$KEY" '.[$k] // empty' env.json) + if [[ "$CURRENT" == "$VALUE" ]]; then + echo "No change: $KEY is already set to that value." + exit 0 + fi + + jq --arg k "$KEY" --arg v "$VALUE" '.[$k] = $v' env.json > env.json.tmp + mv env.json.tmp env.json + + git add env.json + git commit -m "$MSG" + git push + echo "Done: $KEY updated and pushed." +REMOTE diff --git a/.cursor/scripts/tool-sync.sh b/.cursor/scripts/tool-sync.sh new file mode 100755 index 0000000..b8c3d71 --- /dev/null +++ b/.cursor/scripts/tool-sync.sh @@ -0,0 +1,406 @@ +#!/usr/bin/env bash +# tool-sync.sh — Sync Cursor rules, skills, and scripts to OpenCode and Claude Code. +# Source of truth: ~/.cursor/ +# Targets: ~/.config/opencode/, ~/.claude/ +# +# Usage: tool-sync.sh [--dry-run] [--target opencode|claude|all] +# --dry-run Show what would change without writing files +# --target Sync to a specific target (default: all) + +set -euo pipefail + +CURSOR_DIR="$HOME/.cursor" +OPENCODE_DIR="$HOME/.config/opencode" +CLAUDE_DIR="$HOME/.claude" +DRY_RUN=false +TARGET="all" + +while [[ $# -gt 0 ]]; do + case "$1" in + --dry-run) DRY_RUN=true; shift ;; + --target) TARGET="$2"; shift 2 ;; + *) echo "Unknown option: $1" >&2; exit 1 ;; + esac +done + +# Counters +created=0 +updated=0 +removed=0 +skipped=0 + +log() { echo " $1"; } +log_action() { + local action="$1" file="$2" + if [[ "$DRY_RUN" == true ]]; then + echo " [DRY-RUN] $action: $file" + else + echo " $action: $file" + fi +} + +# ─── Helpers ────────────────────────────────────────────────────────────────── + +# Convert .mdc to .md: strip Cursor-specific XML tags, keep content +mdc_to_md() { + local src="$1" + # .mdc files are already valid markdown with YAML frontmatter. + # Some use , , , XML tags — convert to markdown. + sed \ + -e 's|^\(.*\)|## Goal\n\n\1|' \ + -e 's|^|## Goal\n|' \ + -e 's|^||' \ + -e 's|^|## Rules\n|' \ + -e 's|^||' \ + -e 's|^\(.*\)|- **\1**: \2|' \ + -e 's|^|- **\1**:|' \ + -e 's|^||' \ + -e 's|^|### Step \1: \2\n|' \ + -e 's|^||' \ + -e '/^$/N;/^\n$/d' \ + "$src" +} + +# Generate OpenCode JSON metadata from a .mdc rule file +generate_rule_json() { + local src="$1" name="$2" + local description="" always_apply="false" globs="[]" + + # Parse YAML frontmatter + local in_frontmatter=false + while IFS= read -r line; do + if [[ "$line" == "---" ]]; then + if [[ "$in_frontmatter" == true ]]; then break; fi + in_frontmatter=true + continue + fi + if [[ "$in_frontmatter" == true ]]; then + case "$line" in + description:*) description="${line#description: }" ;; + alwaysApply:*) always_apply="${line#alwaysApply: }" ;; + globs:*) globs="${line#globs: }" ;; + esac + fi + done < "$src" + + jq -n \ + --arg id "$name" \ + --arg title "$name" \ + --arg description "$description" \ + --argjson globs "$globs" \ + --argjson alwaysApply "$always_apply" \ + '{id: $id, title: $title, description: $description, globs: $globs, alwaysApply: $alwaysApply}' +} + +# Generate OpenCode JSON metadata from a command .md file +generate_command_json() { + local src="$1" name="$2" + + # Extract goal line (first paragraph after ## Goal) + local goal="" + goal=$(awk '/^## Goal/{getline; getline; print; exit}' "$src") + + # Extract rules as JSON array + local rules="[]" + rules=$(awk ' + /^## Rules/,/^## |^### Step/ { + if (/^- \*\*([^*]+)\*\*: (.+)/) { + match($0, /\*\*([^*]+)\*\*: (.+)/, m) + if (m[1] != "") { + printf "{\"id\":\"%s\",\"instruction\":\"%s\"}\n", m[1], m[2] + } + } + } + ' "$src" | jq -s '.' 2>/dev/null || echo "[]") + + # Extract steps as JSON array + local steps="[]" + steps=$(awk ' + /^### Step [0-9]+:/ { + match($0, /^### Step ([0-9]+): (.+)/, m) + if (m[1] != "") { + if (step_id != "") { printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction } + step_id = m[1]; step_name = m[2]; instruction = "" + } + next + } + /^## / { if (step_id != "") { printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction; step_id="" } next } + step_id != "" { gsub(/"/, "\\\""); instruction = instruction ($0 != "" ? (instruction != "" ? "\\n" : "") $0 : "") } + END { if (step_id != "") printf "{\"id\":\"%s\",\"name\":\"%s\",\"instruction\":\"%s\"}\n", step_id, step_name, instruction } + ' "$src" | jq -s '.' 2>/dev/null || echo "[]") + + jq -n \ + --arg id "$name" \ + --arg title "$name" \ + --arg description "$goal" \ + --arg goal "$goal" \ + --argjson rules "$rules" \ + --argjson steps "$steps" \ + '{id: $id, title: $title, description: $description, goal: $goal, rules: $rules, steps: $steps, scripts: ["sh"]}' +} + +# Copy file only if changed, respecting --dry-run +sync_file() { + local src="$1" dest="$2" + if [[ ! -f "$dest" ]]; then + log_action "CREATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + mkdir -p "$(dirname "$dest")" + cp "$src" "$dest" + fi + ((created++)) || true + elif ! diff -q "$src" "$dest" >/dev/null 2>&1; then + log_action "UPDATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + cp "$src" "$dest" + fi + ((updated++)) || true + else + ((skipped++)) || true + fi +} + +# Write content to file only if changed +sync_content() { + local content="$1" dest="$2" + local tmp + tmp=$(mktemp) + cat <<< "$content" > "$tmp" + if [[ ! -f "$dest" ]]; then + log_action "CREATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + mkdir -p "$(dirname "$dest")" + mv "$tmp" "$dest" + else + rm "$tmp" + fi + ((created++)) || true + elif ! diff -q "$tmp" "$dest" >/dev/null 2>&1; then + log_action "UPDATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + mv "$tmp" "$dest" + else + rm "$tmp" + fi + ((updated++)) || true + else + rm "$tmp" + ((skipped++)) || true + fi +} + +# Create symlink, replacing if target changed +sync_symlink() { + local src="$1" dest="$2" + if [[ -L "$dest" ]]; then + local current + current=$(readlink "$dest") + if [[ "$current" == "$src" ]]; then + ((skipped++)) || true + return + fi + log_action "UPDATE" "$dest -> $src" + if [[ "$DRY_RUN" == false ]]; then + ln -sf "$src" "$dest" + fi + ((updated++)) || true + elif [[ -f "$dest" ]]; then + log_action "REPLACE" "$dest (file -> symlink)" + if [[ "$DRY_RUN" == false ]]; then + rm "$dest" + ln -s "$src" "$dest" + fi + ((updated++)) || true + else + log_action "CREATE" "$dest -> $src" + if [[ "$DRY_RUN" == false ]]; then + mkdir -p "$(dirname "$dest")" + ln -s "$src" "$dest" + fi + ((created++)) || true + fi +} + +# ─── OpenCode Sync ──────────────────────────────────────────────────────────── + +sync_opencode() { + echo "━━━ Syncing to OpenCode ━━━" + + # Rules: .mdc → .md + .json + echo " Rules:" + for mdc in "$CURSOR_DIR"/rules/*.mdc; do + [[ -f "$mdc" ]] || continue + local name + name=$(basename "$mdc" .mdc) + + # Convert .mdc to .md + local tmp_md + tmp_md=$(mktemp) + mdc_to_md "$mdc" > "$tmp_md" + sync_file "$tmp_md" "$OPENCODE_DIR/rules/$name.md" + rm -f "$tmp_md" + + # Generate .json + local json + json=$(generate_rule_json "$mdc" "$name") + sync_content "$json" "$OPENCODE_DIR/rules/$name.json" + done + + # Skills: SKILL.md + scripts/ subdirs + echo " Skills:" + if [[ -d "$CURSOR_DIR/skills" ]]; then + # Shared scripts at skills/ top level + for shared in "$CURSOR_DIR"/skills/*.sh; do + [[ -f "$shared" ]] || continue + local name + name=$(basename "$shared") + sync_file "$shared" "$OPENCODE_DIR/skills/$name" + done + # Skill dirs with SKILL.md + scripts/ + for skill_dir in "$CURSOR_DIR"/skills/*/; do + [[ -d "$skill_dir" ]] || continue + local name + name=$(basename "$skill_dir") + if [[ -f "$skill_dir/SKILL.md" ]]; then + sync_file "$skill_dir/SKILL.md" "$OPENCODE_DIR/skills/$name/SKILL.md" + fi + if [[ -d "$skill_dir/scripts" ]]; then + for script in "$skill_dir"/scripts/*; do + [[ -f "$script" ]] || continue + local fname + fname=$(basename "$script") + sync_file "$script" "$OPENCODE_DIR/skills/$name/scripts/$fname" + done + fi + done + fi + + # Standalone scripts + echo " Scripts:" + for script in "$CURSOR_DIR"/scripts/*.sh "$CURSOR_DIR"/scripts/*.js; do + [[ -f "$script" ]] || continue + local name + name=$(basename "$script") + sync_file "$script" "$OPENCODE_DIR/scripts/$name" + done + + # Clean up stale files in OpenCode that no longer exist in Cursor + echo " Cleanup:" + for oc_rule in "$OPENCODE_DIR"/rules/*.md; do + [[ -f "$oc_rule" ]] || continue + local name + name=$(basename "$oc_rule" .md) + if [[ ! -f "$CURSOR_DIR/rules/$name.mdc" ]]; then + log_action "REMOVE" "$oc_rule" + if [[ "$DRY_RUN" == false ]]; then + rm -f "$oc_rule" "$OPENCODE_DIR/rules/$name.json" + fi + ((removed++)) || true + fi + done + + for oc_skill_dir in "$OPENCODE_DIR"/skills/*/; do + [[ -d "$oc_skill_dir" ]] || continue + local name + name=$(basename "$oc_skill_dir") + if [[ ! -d "$CURSOR_DIR/skills/$name" ]]; then + log_action "REMOVE" "$oc_skill_dir" + if [[ "$DRY_RUN" == false ]]; then + rm -rf "$oc_skill_dir" + fi + ((removed++)) || true + fi + done +} + +# ─── Claude Code Sync ───────────────────────────────────────────────────────── + +sync_claude() { + echo "━━━ Syncing to Claude Code ━━━" + + # Skills: symlink SKILL.md files + echo " Skills (symlinks):" + if [[ -d "$CURSOR_DIR/skills" ]]; then + for skill_dir in "$CURSOR_DIR"/skills/*/; do + [[ -d "$skill_dir" ]] || continue + local name + name=$(basename "$skill_dir") + if [[ -f "$skill_dir/SKILL.md" ]]; then + sync_symlink "$skill_dir/SKILL.md" "$CLAUDE_DIR/skills/$name/SKILL.md" + fi + done + fi + + # Clean up stale symlinks + if [[ -d "$CLAUDE_DIR/skills" ]]; then + for link in "$CLAUDE_DIR"/skills/*/SKILL.md; do + [[ -e "$link" ]] || continue + if [[ -L "$link" ]]; then + local target + target=$(readlink "$link") + if [[ ! -f "$target" ]]; then + log_action "REMOVE" "$link (dead symlink)" + if [[ "$DRY_RUN" == false ]]; then rm "$link"; fi + ((removed++)) || true + fi + fi + done + fi + + # CLAUDE.md: generate with @import for each rule + echo " CLAUDE.md:" + local dest="$CLAUDE_DIR/CLAUDE.md" + local tmp + tmp=$(mktemp) + + { + echo "# Rules" + echo "" + echo "# Imported from ~/.cursor/rules/ — do not edit manually." + echo "# Re-generate with: ~/.cursor/scripts/tool-sync.sh" + echo "" + for mdc in "$CURSOR_DIR"/rules/*.mdc; do + [[ -f "$mdc" ]] || continue + echo "@$mdc" + done + } > "$tmp" + + if [[ ! -f "$dest" ]]; then + log_action "CREATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + mv "$tmp" "$dest" + else + rm "$tmp" + fi + ((created++)) || true + elif ! diff -q "$tmp" "$dest" >/dev/null 2>&1; then + log_action "UPDATE" "$dest" + if [[ "$DRY_RUN" == false ]]; then + mv "$tmp" "$dest" + else + rm "$tmp" + fi + ((updated++)) || true + else + rm "$tmp" + ((skipped++)) || true + fi +} + +# ─── Main ───────────────────────────────────────────────────────────────────── + +echo "tool-sync: Cursor → ${TARGET}" +if [[ "$DRY_RUN" == true ]]; then + echo "(dry run — no files will be modified)" +fi +echo "" + +case "$TARGET" in + opencode) sync_opencode ;; + claude) sync_claude ;; + all) sync_opencode; echo ""; sync_claude ;; + *) echo "Unknown target: $TARGET" >&2; exit 1 ;; +esac + +echo "" +echo "Done: $created created, $updated updated, $removed removed, $skipped unchanged" diff --git a/.cursor/skills/asana-get-context.sh b/.cursor/skills/asana-get-context.sh new file mode 100755 index 0000000..87f3792 --- /dev/null +++ b/.cursor/skills/asana-get-context.sh @@ -0,0 +1,232 @@ +#!/usr/bin/env bash +# asana-get-context.sh +# Fetch concise context from an Asana task for implementation or PR creation. +# +# Usage: +# asana-get-context.sh +# asana-get-context.sh --task-url +# asana-get-context.sh --task +# +# Accepts a raw task GID or a full Asana URL. URL formats supported: +# https://app.asana.com/0//[/f] +# https://app.asana.com/1//task/[/f] +# +# Requires env var: ASANA_TOKEN +# +# Output (compact, agent-friendly): +# TASK_NAME: +# TASK_DESCRIPTION: +# PRIORITY: +# STATUS: +# IMPLEMENTOR: +# REVIEWER: +# COMMENTS: (most recent 5, one per block) +# ATTACHMENTS: files +# DOWNLOADED: files to +# UNPACKED: -> ( files) [if ZIPs present] +# PDF_TEXT: (from , chars) [if PDF has text] +# PDF_PAGES: ( pages from ) [if PDF is image-based] +set -euo pipefail + +# Parse arguments: accept positional, --task, or --task-url +RAW_INPUT="" +while [[ $# -gt 0 ]]; do + case "$1" in + --task-url|--task) + RAW_INPUT="${2:-}" + shift 2 + ;; + -*) + echo "Unknown flag: $1" >&2 + exit 1 + ;; + *) + RAW_INPUT="$1" + shift + ;; + esac +done + +if [[ -z "$RAW_INPUT" ]]; then + echo "Usage: asana-get-context.sh " >&2 + exit 1 +fi + +# Extract task GID: accept a raw numeric GID or any Asana URL containing one. +# Strips trailing path segments (/f, /subtask/…) and query strings. +if [[ "$RAW_INPUT" =~ /task/([0-9]+) ]]; then + TASK_GID="${BASH_REMATCH[1]}" +elif [[ "$RAW_INPUT" =~ /([0-9]+)(/f)?([?#].*)?$ ]]; then + TASK_GID="${BASH_REMATCH[1]}" +elif [[ "$RAW_INPUT" =~ ^[0-9]+$ ]]; then + TASK_GID="$RAW_INPUT" +else + echo "Error: could not extract task GID from: $RAW_INPUT" >&2 + exit 1 +fi +if [[ -z "${ASANA_TOKEN:-}" ]]; then + echo "Error: ASANA_TOKEN not set" >&2 + exit 1 +fi + +API="https://app.asana.com/api/1.0" +AUTH="Authorization: Bearer $ASANA_TOKEN" + +# Fetch task + custom fields +curl -s "$API/tasks/$TASK_GID?opt_fields=name,notes,custom_fields.gid,custom_fields.name,custom_fields.display_value" \ + -H "$AUTH" | python3 -c " +import sys, json +data = json.load(sys.stdin)['data'] + +print(f\"TASK_NAME: {data['name']}\") + +notes = (data.get('notes') or '').strip() +if len(notes) > 500: + notes = notes[:500] + '...' +print(f\"TASK_DESCRIPTION: {notes or '(empty)'}\") + +FIELDS = { + '795866930204488': 'PRIORITY', + '1190660107346181': 'STATUS', + '1203334386796983': 'IMPLEMENTOR', + '1203334388004673': 'REVIEWER', +} +for f in data.get('custom_fields', []): + label = FIELDS.get(f['gid']) + if label: + val = f.get('display_value') or '(not set)' + print(f'{label}: {val}') +" + +# Fetch project memberships — look for version project (e.g. "4.44.0") +curl -s "$API/tasks/$TASK_GID?opt_fields=memberships.project.name" \ + -H "$AUTH" | python3 -c " +import sys, json, re +data = json.load(sys.stdin)['data'] +for m in data.get('memberships', []): + name = m.get('project', {}).get('name', '') + if re.match(r'^\d+\.\d+\.\d+$', name): + print(f'VERSION_PROJECT: {name}') + break +else: + print('VERSION_PROJECT: (not set)') +" + +# Fetch recent comments (last 5) +curl -s "$API/tasks/$TASK_GID/stories?opt_fields=resource_subtype,text,created_by.name,created_at&limit=100" \ + -H "$AUTH" | python3 -c " +import sys, json +data = json.load(sys.stdin)['data'] +comments = [s for s in data if s.get('resource_subtype') == 'comment_added'][-5:] +if not comments: + print('COMMENTS: (none)') +else: + print('COMMENTS:') + for c in comments: + author = c.get('created_by', {}).get('name', 'unknown') + text = (c.get('text') or '').strip().replace('\n', ' ') + if len(text) > 200: + text = text[:200] + '...' + date = c.get('created_at', '')[:10] + print(f' [{date}] {author}: {text}') +" + +# Fetch attachments — download all supported types, then post-process +DOWNLOAD_DIR="/tmp/asana-task-$TASK_GID" + +# Phase 1: Download all supported attachments +curl -s "$API/tasks/$TASK_GID/attachments?opt_fields=name,resource_subtype,download_url" \ + -H "$AUTH" | python3 -c " +import sys, json, os, urllib.request + +data = json.load(sys.stdin)['data'] +if not data: + print('ATTACHMENTS: (none)') + sys.exit(0) + +DOWNLOAD_EXTS = { + '.md', '.txt', '.json', '.csv', '.log', '.yaml', '.yml', + '.pdf', + '.zip', + '.png', '.jpg', '.jpeg', '.gif', '.webp', +} +download_dir = '$DOWNLOAD_DIR' +downloaded = [] + +print(f'ATTACHMENTS: {len(data)} files') +for a in data: + name = a.get('name', 'unnamed') + url = a.get('download_url') + ext = os.path.splitext(name)[1].lower() + if ext in DOWNLOAD_EXTS and url: + os.makedirs(download_dir, exist_ok=True) + dest = os.path.join(download_dir, name) + try: + urllib.request.urlretrieve(url, dest) + downloaded.append(dest) + print(f' - {name} (downloaded)') + except Exception as e: + print(f' - {name} (download failed: {e})') + else: + print(f' - {name}') + +if downloaded: + print(f'DOWNLOADED: {len(downloaded)} files to {download_dir}') + for d in downloaded: + print(f' {d}') +" + +# Phase 2: Unpack ZIP archives (may produce more files to process) +shopt -s nullglob +for zip_file in "$DOWNLOAD_DIR"/*.zip; do + subdir="$DOWNLOAD_DIR/$(basename "$zip_file" .zip)" + if unzip -o -q "$zip_file" -d "$subdir" 2>/dev/null; then + file_count=$(find "$subdir" -type f 2>/dev/null | wc -l | tr -d ' ') + echo "UNPACKED: $(basename "$zip_file") -> $subdir ($file_count files)" + rm "$zip_file" + else + echo "UNPACK_FAILED: $(basename "$zip_file")" + fi +done +shopt -u nullglob + +# Phase 3: Process PDFs (text extraction first, image fallback) +process_pdf() { + local pdf="$1" + local base="${pdf%.pdf}" + local fname + fname="$(basename "$pdf")" + + if command -v pdftotext &>/dev/null; then + local text + text=$(pdftotext "$pdf" - 2>/dev/null || true) + local char_count + char_count=$(printf '%s' "$text" | tr -d '[:space:]' | wc -c | tr -d ' ') + if [[ "$char_count" -gt 100 ]]; then + printf '%s' "$text" > "${base}.txt" + echo "PDF_TEXT: ${base}.txt (from $fname, ${char_count} chars)" + return + fi + fi + + if command -v pdftoppm &>/dev/null; then + local pages_dir="${base}_pages" + mkdir -p "$pages_dir" + pdftoppm -png -r 150 "$pdf" "$pages_dir/page" 2>/dev/null + local page_count + page_count=$(find "$pages_dir" -name 'page-*.png' 2>/dev/null | wc -l | tr -d ' ') + if [[ "$page_count" -gt 0 ]]; then + echo "PDF_PAGES: $pages_dir ($page_count pages from $fname)" + else + echo "PDF_CONVERT_FAILED: $fname" + fi + else + echo "PDF_SKIPPED: $fname (install poppler-utils for text/image extraction)" + fi +} + +if [[ -d "$DOWNLOAD_DIR" ]]; then + while IFS= read -r pdf; do + process_pdf "$pdf" + done < <(find "$DOWNLOAD_DIR" -name '*.pdf' -type f 2>/dev/null) +fi diff --git a/.cursor/skills/asana-plan/SKILL.md b/.cursor/skills/asana-plan/SKILL.md new file mode 100644 index 0000000..2f8a519 --- /dev/null +++ b/.cursor/skills/asana-plan/SKILL.md @@ -0,0 +1,60 @@ +--- +name: asana-plan +description: Create an implementation plan from either an Asana task URL or ad-hoc text/file requirements, then wait for user confirmation before implementation. +compatibility: Requires jq. ASANA_TOKEN for Asana context when task URLs are provided. +metadata: + author: j0ntz +--- + +Produce a plan document via Cursor planning flow from Asana or text requirements, and hand off approved context to implementation skills. + + +If input is an Asana task URL, read and follow `~/.cursor/skills/task-review/SKILL.md` steps 1-3 before planning. +Do not start implementation while in this skill. End by asking for confirmation. +Use Cursor's plan tool to output the plan document to the normal planning location. + + + +Accept two input forms: + +1. **Asana URL mode**: Task URL is provided +2. **Text/file mode**: Ad-hoc text requirement or file reference is provided + +If input is ambiguous, ask the user to clarify which mode applies. + + + + +Read `~/.cursor/skills/task-review/SKILL.md` and run its steps 1-3 to fetch and summarize task context. + + + +Read the provided description and any referenced file(s), then summarize scope, target areas, and assumptions. + + + + +Create a concise actionable implementation plan using Cursor's plan flow. Include: + +- Summary +- Goal / Definition of Done +- Likely relevant files +- Findings so far +- Numbered implementation steps +- Constraints + + + +Return: + +1. Plan file path +2. Short execution summary (what will be changed) + +Then ask for confirmation before implementation: + +> Does this match your understanding? Any adjustments before I start? + + + +`/im` consumes this output and starts only after user confirmation. `/im` should not re-run a second independent confirmation flow for the same plan. + diff --git a/.cursor/skills/asana-task-update/SKILL.md b/.cursor/skills/asana-task-update/SKILL.md new file mode 100644 index 0000000..91126f1 --- /dev/null +++ b/.cursor/skills/asana-task-update/SKILL.md @@ -0,0 +1,89 @@ +--- +name: asana-task-update +description: Update Asana tasks via one reusable workflow (attach PRs, assign/unassign, set status, and update task fields). Use when any skill needs to modify Asana task state. +compatibility: Requires jq. ASANA_TOKEN for Asana API updates. ASANA_GITHUB_SECRET for PR attach operations. +metadata: + author: j0ntz +--- + +Perform Asana task mutations through one shared command and one shared script, so all callers use the same field mappings and prompts. + + +Use `~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh` for all Asana task mutations. Do not call raw Asana APIs directly from skills that can delegate here. +Every operation requires `--task `. +`--attach-pr` requires `ASANA_GITHUB_SECRET`. Other operations require `ASANA_TOKEN`. +If the script exits code 2 with `PROMPT_REVIEWER` or `PROMPT_IMPLEMENTOR`, ask the user and re-run with explicit `--reviewer` or `--implementor`. Hands-off callers may instead pass `--skip-assign-if-missing` to convert missing-reviewer assignment into a non-blocking skip. +Asana updates can take time. Use `block_until_ms: 120000` for script calls. + + + +```bash +# Attach only +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task \ + --attach-pr --pr-url --pr-title "" --pr-number <num> + +# Attach + assign reviewer + set review-needed status + estimate review hours +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task <task_gid> \ + --attach-pr --pr-url <url> --pr-title "<title>" --pr-number <num> \ + --assign --set-status "Review Needed" --auto-est-review-hrs + +# Hands-off attach + best-effort assign (skip if reviewer missing) +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task <task_gid> \ + --attach-pr --pr-url <url> --pr-title "<title>" --pr-number <num> \ + --assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs + +# Publish Needed -> Verification Needed (and unassign) +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task <task_gid> \ + --set-status "Verification Needed" --unassign +``` +</usage> + +<step id="1" name="Build operation flags"> +Determine which updates are needed by the caller and build one command with all flags: + +- `--attach-pr --pr-url --pr-title --pr-number` +- `--assign` or `--assign <user_gid>` +- `--skip-assign-if-missing` +- `--unassign` +- `--set-status "Review Needed|Publish Needed|Verification Needed"` +- `--set-reviewer <user_gid>` +- `--set-implementor <user_gid>` +- `--set-priority <enum_gid>` +- `--set-planned <enum_gid>` +- `--auto-est-review-hrs` +</step> + +<step id="2" name="Run update script"> +Run `asana-task-update.sh` with the built flags. Prefer one call with combined operations over multiple calls. +</step> + +<step id="3" name="Handle prompts"> +If exit code is 2: + +- `PROMPT_REVIEWER`: ask who to assign, then re-run with `--reviewer <gid>` and `--assign` +- `PROMPT_IMPLEMENTOR`: ask who to set as implementor, then re-run with `--implementor <gid>` + +If the caller used `--skip-assign-if-missing`, do not ask about `PROMPT_REVIEWER` because the script will not emit it for missing-reviewer cases. +</step> + +<step id="4" name="Report result"> +Summarize one line per action from script output (attach result, assignment, status change, field updates). +</step> + +<team-roster description="Asana user GIDs. Use numbered lists when prompting users."> +1. Jon Tzeng — `1200972350160586` +2. William Swanson — `10128869002320` +3. Paul Puey — `9976421903322` +4. Sam Holmes — `1198904591136142` +5. Matthew Piche — `522823585857811` +</team-roster> + +<exit-codes> +- `0`: success +- `1`: error +- `2`: needs user input (`PROMPT_REVIEWER`, `PROMPT_IMPLEMENTOR`) +</exit-codes> diff --git a/.cursor/skills/asana-task-update/scripts/asana-task-update.sh b/.cursor/skills/asana-task-update/scripts/asana-task-update.sh new file mode 100755 index 0000000..7e87d3d --- /dev/null +++ b/.cursor/skills/asana-task-update/scripts/asana-task-update.sh @@ -0,0 +1,268 @@ +#!/usr/bin/env bash +# asana-task-update.sh +# Unified Asana task mutation script. +# +# Exit codes: +# 0 = success +# 1 = error +# 2 = needs user input (PROMPT_REVIEWER, PROMPT_IMPLEMENTOR) +set -euo pipefail + +TASK_GID="" +DO_ATTACH=false +PR_URL="" +PR_TITLE="" +PR_NUMBER="" + +DO_ASSIGN=false +ASSIGN_GID="" +SKIP_ASSIGN_IF_MISSING=false +DO_UNASSIGN=false + +SET_STATUS="" +SET_REVIEWER_GID="" +SET_IMPLEMENTOR_GID="" +SET_PRIORITY_GID="" +SET_PLANNED_GID="" +AUTO_EST_REVIEW=false + +while [[ $# -gt 0 ]]; do + case "$1" in + --task) TASK_GID="$2"; shift 2 ;; + --attach-pr) DO_ATTACH=true; shift ;; + --pr-url) PR_URL="$2"; shift 2 ;; + --pr-title) PR_TITLE="$2"; shift 2 ;; + --pr-number) PR_NUMBER="$2"; shift 2 ;; + --assign) + DO_ASSIGN=true + if [[ $# -ge 2 && "${2:0:2}" != "--" ]]; then + ASSIGN_GID="$2" + shift 2 + else + shift + fi + ;; + --skip-assign-if-missing) SKIP_ASSIGN_IF_MISSING=true; shift ;; + --unassign) DO_UNASSIGN=true; shift ;; + --set-status) SET_STATUS="$2"; shift 2 ;; + --set-reviewer|--reviewer) SET_REVIEWER_GID="$2"; shift 2 ;; + --set-implementor|--implementor) SET_IMPLEMENTOR_GID="$2"; shift 2 ;; + --set-priority) SET_PRIORITY_GID="$2"; shift 2 ;; + --set-planned) SET_PLANNED_GID="$2"; shift 2 ;; + --auto-est-review-hrs) AUTO_EST_REVIEW=true; shift ;; + *) echo "Unknown flag: $1" >&2; exit 1 ;; + esac +done + +if [[ -z "$TASK_GID" ]]; then + echo "Error: --task <task_gid> is required" >&2 + exit 1 +fi + +if ! $DO_ATTACH && ! $DO_ASSIGN && ! $DO_UNASSIGN && [[ -z "$SET_STATUS" ]] && [[ -z "$SET_REVIEWER_GID" ]] && [[ -z "$SET_IMPLEMENTOR_GID" ]] && [[ -z "$SET_PRIORITY_GID" ]] && [[ -z "$SET_PLANNED_GID" ]] && ! $AUTO_EST_REVIEW; then + echo "Error: No operations specified" >&2 + exit 1 +fi + +if [[ -z "${ASANA_TOKEN:-}" ]]; then + echo "Error: ASANA_TOKEN not set" >&2 + exit 1 +fi + +if $DO_ATTACH && [[ -z "${ASANA_GITHUB_SECRET:-}" ]]; then + echo "Error: ASANA_GITHUB_SECRET not set (required for --attach-pr)" >&2 + exit 1 +fi + +ASANA_API="https://app.asana.com/api/1.0" +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Airbitz.co workspace field GIDs +STATUS_FIELD="1190660107346181" +REVIEW_NEEDED_OPTION="1190660107348334" +PUBLISH_NEEDED_OPTION="1191304757575656" +VERIFICATION_NEEDED_OPTION="1190660107348340" +REVIEWER_FIELD="1203334388004673" +IMPLEMENTOR_FIELD="1203334386796983" +SPENT_DEV_HRS_FIELD="1202996660964169" +EST_REVIEW_HRS_FIELD="1203002792997295" + +status_to_gid() { + case "$1" in + "Review Needed") echo "$REVIEW_NEEDED_OPTION" ;; + "Publish Needed") echo "$PUBLISH_NEEDED_OPTION" ;; + "Verification Needed") echo "$VERIFICATION_NEEDED_OPTION" ;; + *) echo "$1" ;; + esac +} + +TASK_FIELDS="" +load_task_fields() { + if [[ -n "$TASK_FIELDS" ]]; then + return 0 + fi + TASK_FIELDS=$(curl -sf "$ASANA_API/tasks/$TASK_GID?opt_fields=name,assignee.name,custom_fields.gid,custom_fields.name,custom_fields.people_value.gid,custom_fields.people_value.name,custom_fields.number_value,custom_fields.enum_value.gid,custom_fields.enum_value.name" \ + -H "Authorization: Bearer $ASANA_TOKEN") +} + +read_people_field() { + local field_gid="$1" + echo "$TASK_FIELDS" | jq -r --arg gid "$field_gid" ' + .data.custom_fields[] + | select(.gid == $gid) + | (.people_value[0].gid // "") + ' | head -n 1 +} + +if $DO_ATTACH; then + if [[ -z "$PR_URL" || -z "$PR_TITLE" || -z "$PR_NUMBER" ]]; then + echo "Error: --attach-pr requires --pr-url, --pr-title, and --pr-number" >&2 + exit 1 + fi + + ATTACH_RESULT=$(curl -s -X POST "https://github.integrations.asana.plus/custom/v1/actions/widget" \ + -H "Authorization: Bearer $ASANA_GITHUB_SECRET" \ + -H "Content-Type: application/json" \ + -d "{ + \"allowedProjects\": [], + \"blockedProjects\": [], + \"pullRequestDescription\": \"https://app.asana.com/0/0/$TASK_GID\", + \"pullRequestName\": $(jq -Rn --arg v "$PR_TITLE" '$v'), + \"pullRequestNumber\": $PR_NUMBER, + \"pullRequestURL\": \"$PR_URL\" + }" 2>&1) + + ATTACH_STATUS=$(echo "$ATTACH_RESULT" | python3 -c "import sys,json; r=json.load(sys.stdin); print(r[0].get('result','unknown'))" 2>/dev/null || echo "error: $ATTACH_RESULT") + echo ">> PR attach: $ATTACH_STATUS" +fi + +if $DO_ASSIGN || [[ -n "$SET_REVIEWER_GID" ]] || [[ -n "$SET_IMPLEMENTOR_GID" ]] || $AUTO_EST_REVIEW || [[ -n "$SET_PRIORITY_GID" ]] || [[ -n "$SET_PLANNED_GID" ]]; then + load_task_fields +fi + +if $DO_ASSIGN; then + if [[ -z "$ASSIGN_GID" ]]; then + ASSIGN_GID="${SET_REVIEWER_GID:-$(read_people_field "$REVIEWER_FIELD")}" + fi + if [[ -z "$ASSIGN_GID" ]]; then + if $SKIP_ASSIGN_IF_MISSING; then + echo ">> Assignee: skipped (no reviewer provided or found on task)" + DO_ASSIGN=false + else + echo ">> PROMPT_REVIEWER" + exit 2 + fi + fi + + if $DO_ASSIGN; then + if [[ -z "$SET_REVIEWER_GID" ]]; then + SET_REVIEWER_GID="$ASSIGN_GID" + fi + + if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then + SET_IMPLEMENTOR_GID="$(read_people_field "$IMPLEMENTOR_FIELD")" + fi + if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then + SET_IMPLEMENTOR_GID="$("$SCRIPT_DIR/../../asana-whoami.sh" 2>/dev/null || true)" + if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then + echo ">> Implementor: auto-resolved to current user ($SET_IMPLEMENTOR_GID)" + fi + fi + if [[ -z "$SET_IMPLEMENTOR_GID" ]]; then + echo ">> PROMPT_IMPLEMENTOR" + exit 2 + fi + fi +fi + +CUSTOM_FIELDS_PATCH='{}' + +if [[ -n "$SET_STATUS" ]]; then + STATUS_GID="$(status_to_gid "$SET_STATUS")" + CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$STATUS_FIELD" --arg v "$STATUS_GID" '. + {($k): $v}') +fi +if [[ -n "$SET_REVIEWER_GID" ]]; then + CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$REVIEWER_FIELD" --arg v "$SET_REVIEWER_GID" '. + {($k): [$v]}') +fi +if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then + CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$IMPLEMENTOR_FIELD" --arg v "$SET_IMPLEMENTOR_GID" '. + {($k): [$v]}') +fi +if [[ -n "$SET_PRIORITY_GID" ]]; then + PRIORITY_FIELD_GID=$(echo "$TASK_FIELDS" | jq -r '.data.custom_fields[] | select(.name == "Priority") | .gid' | head -n 1) + if [[ -n "$PRIORITY_FIELD_GID" ]]; then + CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$PRIORITY_FIELD_GID" --arg v "$SET_PRIORITY_GID" '. + {($k): $v}') + fi +fi +if [[ -n "$SET_PLANNED_GID" ]]; then + PLANNED_FIELD_GID=$(echo "$TASK_FIELDS" | jq -r '.data.custom_fields[] | select(.name == "Planned") | .gid' | head -n 1) + if [[ -n "$PLANNED_FIELD_GID" ]]; then + CUSTOM_FIELDS_PATCH=$(echo "$CUSTOM_FIELDS_PATCH" | jq --arg k "$PLANNED_FIELD_GID" --arg v "$SET_PLANNED_GID" '. + {($k): $v}') + fi +fi + +UPDATE_BODY='{"data":{}}' +HAS_UPDATE=false + +if [[ "$CUSTOM_FIELDS_PATCH" != "{}" ]]; then + UPDATE_BODY=$(echo "$UPDATE_BODY" | jq --argjson cf "$CUSTOM_FIELDS_PATCH" '.data.custom_fields = $cf') + HAS_UPDATE=true +fi + +if $DO_UNASSIGN; then + UPDATE_BODY=$(echo "$UPDATE_BODY" | jq '.data.assignee = null') + HAS_UPDATE=true +elif $DO_ASSIGN; then + UPDATE_BODY=$(echo "$UPDATE_BODY" | jq --arg a "$ASSIGN_GID" '.data.assignee = $a') + HAS_UPDATE=true +fi + +if $HAS_UPDATE; then + curl -sf -X PUT "$ASANA_API/tasks/$TASK_GID" \ + -H "Authorization: Bearer $ASANA_TOKEN" \ + -H "Content-Type: application/json" \ + -d "$UPDATE_BODY" > /dev/null + echo ">> Task fields: updated" +fi + +if $DO_ASSIGN; then + echo ">> Assigned to reviewer: $ASSIGN_GID" +fi +if $DO_UNASSIGN; then + echo ">> Assignee: unset" +fi +if [[ -n "$SET_STATUS" ]]; then + echo ">> Status: $SET_STATUS" +fi +if [[ -n "$SET_REVIEWER_GID" ]]; then + echo ">> Reviewer field: set" +fi +if [[ -n "$SET_IMPLEMENTOR_GID" ]]; then + echo ">> Implementor field: set" +fi +if [[ -n "$SET_PRIORITY_GID" ]]; then + echo ">> Priority field: set" +fi +if [[ -n "$SET_PLANNED_GID" ]]; then + echo ">> Planned field: set" +fi + +if $AUTO_EST_REVIEW; then + load_task_fields + EST_REVIEW=$(echo "$TASK_FIELDS" | jq -r --arg gid "$EST_REVIEW_HRS_FIELD" '.data.custom_fields[] | select(.gid == $gid) | (.number_value // empty)' | head -n 1) + if [[ -n "$EST_REVIEW" ]]; then + echo ">> Est. Review Hrs: already set ($EST_REVIEW)" + else + SPENT_DEV=$(echo "$TASK_FIELDS" | jq -r --arg gid "$SPENT_DEV_HRS_FIELD" '.data.custom_fields[] | select(.gid == $gid) | (.number_value // empty)' | head -n 1) + if [[ -z "$SPENT_DEV" ]]; then + echo ">> Est. Review Hrs: skipped (no Spent Dev Hrs)" + else + EST_VAL=$(python3 -c "v=float('$SPENT_DEV'); x=round(v*0.1,1); print(x if x >= 0.1 else 0.1)") + REVIEW_PATCH=$(jq -n --arg f "$EST_REVIEW_HRS_FIELD" --argjson v "$EST_VAL" '{data:{custom_fields:{($f):$v}}}') + curl -sf -X PUT "$ASANA_API/tasks/$TASK_GID" \ + -H "Authorization: Bearer $ASANA_TOKEN" \ + -H "Content-Type: application/json" \ + -d "$REVIEW_PATCH" > /dev/null + echo ">> Est. Review Hrs: set to $EST_VAL (10% of Spent Dev Hrs)" + fi + fi +fi diff --git a/.cursor/skills/asana-whoami.sh b/.cursor/skills/asana-whoami.sh new file mode 100755 index 0000000..62b73ff --- /dev/null +++ b/.cursor/skills/asana-whoami.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# asana-whoami.sh +# Resolve the current Asana user's GID from $ASANA_TOKEN. +# Caches the result in /tmp for the duration of the session. +# +# Usage: +# asana-whoami.sh # prints GID +# asana-whoami.sh --name # prints "GID NAME" +# +# Requires env var: ASANA_TOKEN +# +# Output: +# <gid> (default) +# <gid> <name> (with --name) +set -euo pipefail + +SHOW_NAME=false +if [[ "${1:-}" == "--name" ]]; then + SHOW_NAME=true +fi + +if [[ -z "${ASANA_TOKEN:-}" ]]; then + echo "Error: ASANA_TOKEN not set" >&2 + exit 1 +fi + +CACHE_FILE="/tmp/asana-whoami-$(echo "$ASANA_TOKEN" | shasum -a 256 | cut -c1-16).json" + +if [[ -f "$CACHE_FILE" ]]; then + cached=$(cat "$CACHE_FILE") +else + cached=$(curl -s "https://app.asana.com/api/1.0/users/me?opt_fields=gid,name" \ + -H "Authorization: Bearer $ASANA_TOKEN") + echo "$cached" > "$CACHE_FILE" +fi + +if [[ "$SHOW_NAME" == "true" ]]; then + echo "$cached" | python3 -c " +import sys, json +d = json.load(sys.stdin)['data'] +print(f\"{d['gid']} {d['name']}\") +" +else + echo "$cached" | python3 -c " +import sys, json +print(json.load(sys.stdin)['data']['gid']) +" +fi diff --git a/.cursor/skills/author/SKILL.md b/.cursor/skills/author/SKILL.md new file mode 100644 index 0000000..2b6ae0f --- /dev/null +++ b/.cursor/skills/author/SKILL.md @@ -0,0 +1,145 @@ +--- +name: author +description: Create, edit, revise, or debug Cursor skills (~/.cursor/skills/*/SKILL.md). Use when the user wants to make a new skill, update an existing skill, fix a skill, or asks about .cursor/skills/ files. Also use when the user says "new command", "create command", "create skill", "edit command", "new skill", "update skill", "update command", or references SKILL.md. NOT for general markdown editing (READMEs, CHANGELOGs, docs, AGENTS.md). +--- + +<goal>Write or revise Cursor commands and skills with maximum agent compliance.</goal> + +<commands-vs-skills> +Skills (`~/.cursor/skills/*/SKILL.md`): The standard unit. Can be invoked explicitly via `/skill-name` or agent-triggered based on task matching against the description. Companion scripts live in `<skill>/scripts/`. Shared scripts live at `~/.cursor/skills/` top-level. +</commands-vs-skills> + +<authoring-principles> +<principle id="prescriptive">Be prescriptive, not descriptive. Commands tell the agent what to DO, not what things ARE.</principle> +<principle id="brief-examples">Examples must be brief and hypothetical. Never use real data from conversations. Keep examples to 3-5 lines max.</principle> +<principle id="dry">DRY across commands. If two commands share logic, extract it into a shared file and have both reference it.</principle> +<principle id="ordering">Order of operations matters. The agent reads top-to-bottom. Put context-setting steps before action steps.</principle> +<principle id="rules-first">Hard rules at the top. Non-negotiable constraints go right after the Goal so they're read before any steps.</principle> +<principle id="escape-hatches">Escape hatches over assumptions. When ambiguity exists, tell the agent to ask — don't let it guess.</principle> +<principle id="scripts-over-reasoning">Offload all deterministic logic to companion scripts. If an operation has a known, repeatable sequence of steps (API calls, git commands, file parsing, linting, data fetching), it belongs in a `.sh` script — not inline in the `.md` as shell blocks the agent must reason about. The `.md` file should only handle semantic decisions, user interaction, and interpreting script output. This eliminates context bloat and prevents the agent from re-deriving logic it doesn't need to understand.</principle> +<principle id="batch-tool-calls">Minimize round-trips. When a step requires multiple independent pieces of information (e.g., git status + git log + git diff), instruct the agent to gather them all in parallel tool calls within a single message/script — not sequentially. Group independent reads, searches, and shell commands together. Only sequence calls when one depends on the output of another.</principle> +<principle id="no-duplicate-automation">Don't duplicate in semantic rules what companion scripts already automate. If a script handles linting, formatting, localization, or other post-processing, the command should reference the script — not also instruct the agent to perform those steps. Duplication risks the agent running a step twice or conflicting with the script's output.</principle> +<principle id="gh-cli-over-curl">For GitHub API operations in companion scripts, use `gh api` and `gh api graphql` over raw `curl` + `$GITHUB_TOKEN`. `gh` handles authentication, pagination (`--paginate`), and API versioning automatically. Use GraphQL (`gh api graphql -f query="..."`) to fetch only required fields in a single request, reducing API calls and context size. Fall back to REST (`gh api repos/...`) only when GraphQL doesn't expose the needed data (e.g., file patches).</principle> +<principle id="node-over-python">When companion scripts need capabilities beyond bash (JSON manipulation, complex regex, structured data processing, async I/O), embed Node.js inline via `exec node -e '...'` rather than depending on Python. Node is already a required dependency for other scripts; adding Python creates an unnecessary second runtime dependency. This keeps scripts as single `.sh` files while unlocking full-featured processing. Avoid single quotes inside the inline node code (bash single-quoted string boundary); use `\x27` in regex to match literal single quotes.</principle> +<principle id="minimize-context">Companion scripts must minimize context consumption. Return structured, filtered summaries — never raw API responses or full file contents. When a script processes large inputs (logs, exports, API payloads), extract only the fields the command needs and discard the rest. Commands should instruct the agent to use targeted reads (grep, line ranges) over full file reads for large files. Every token of script output that the agent reads costs context — design outputs to be as compact as possible while remaining parseable.</principle> +</authoring-principles> + +<formatting> +Use XML tags to structure commands and skills. XML outperforms markdown for LLM instruction-following: + +- Anthropic, OpenAI, and Google all recommend XML tags for structuring prompts. +- Claude is specifically tuned to attend to XML tag boundaries. +- Empirical tests show up to 40% performance variance based on prompt format alone, with XML consistently outperforming markdown. + +Source: https://docs.claude.com/en/docs/use-xml-tags + +<rules> +- Use semantic tag names that describe their content (e.g., `<rules>`, `<step>`, `<edge-cases>`). +- Use attributes for metadata: `id`, `name`, `description`. +- Nest tags for hierarchy: `<step><sub-step>...</sub-step></step>`. +- Be consistent — use the same tag names throughout a command. +- Markdown is still fine for inline formatting within XML tags (bold, code, lists). +</rules> + +<template> +```xml +<goal>One sentence. What does this command accomplish?</goal> + +<rules description="Non-negotiable constraints."> +<rule id="constraint-1">...</rule> +<rule id="constraint-2">...</rule> +</rules> + +<step id="1" name="Step name"> +Instructions for this step. +</step> + +<step id="2" name="Step name"> +Instructions for this step. +</step> + +<edge-cases> +<case name="Case name">How to handle it.</case> +</edge-cases> +``` +</template> +</formatting> + +<small-model-conventions description="Apply these when the command will run on smaller/faster models (e.g., the user says 'for smaller models', 'optimize for lite/fast', or the command is high-frequency and must be cheap). These patterns compensate for weaker instruction-following and shorter reasoning chains."> + +<convention id="verbatim-bash">Give exact shell commands to copy-paste, not descriptions of what to run. Smaller models copy verbatim; they struggle to construct commands from prose. Include placeholders like `<upstream-ref>` only where the agent must substitute a value.</convention> + +<convention id="file-over-args">Pass multi-line content (PR bodies, commit messages, JSON payloads) via temp files, not shell arguments. Write content using the Write tool, then pass `--body-file /tmp/foo.md` to the script. This avoids shell escaping failures that smaller models cannot debug.</convention> + +<convention id="exact-output-templates">When the command produces formatted output (markdown, JSON, reports), show the exact template line-by-line with placeholders. Include blank lines and heading levels explicitly. Example: show `## Accomplishments {day_label}` not "add a heading for accomplishments."</convention> + +<convention id="explicit-parallel">Spell out parallel tool calls: "Run both scripts **in parallel** (two Shell tool calls in one message)." Smaller models default to sequential unless explicitly told otherwise.</convention> + +<convention id="priority-ordered-decisions">When the agent must categorize or choose between options, use a numbered priority list — not prose. Example: "1. If X → do A. 2. If Y → do B. 3. Otherwise → do C." Smaller models follow numbered sequences reliably; they lose track of nested if/else prose.</convention> + +<convention id="inline-guardrails">Duplicate critical rules from cross-referenced files as top-level `<rule>` tags. Smaller models skip "Read file X now" instructions despite explicit language. One-liner guardrails (e.g., `commit-script`, `changelog-required`) catch the failure mode where the cross-read is skipped entirely.</convention> + +<convention id="no-implicit-steps">Every action needs an explicit instruction. Never rely on "follow best practices" or "use appropriate patterns." If the agent should run `git push -u origin HEAD`, write that exact command — don't say "push the branch."</convention> + +<convention id="single-tool-per-step">Where possible, design steps so each step is ONE tool call. Smaller models lose track of multi-tool steps. If a step requires multiple calls, break it into sub-steps with explicit sequencing ("After step 2a completes, run step 2b").</convention> +</small-model-conventions> + +<revision-checklist> +When revising an existing command, **every item below is mandatory** — not a suggestion. Older commands may predate current best practices; touching a command is an opportunity to bring it up to spec. + +1. Read the full file before making changes +2. Check for duplicated logic across other commands — consolidate if found +3. **Check behavioral dependencies**: Search for other commands, skills, and rules that perform similar operations or share domain overlap with the one being edited. If command A has a step that is a lightweight version of command B's core behavior (e.g., `/pr-land` addressing comments vs `/pr-address`), verify that A's step is consistent with B's rules — missing rules in A are likely bugs. + - Extract domain-specific verbs and nouns from the step being edited (e.g., a step about handling PR comments yields: `comment`, `reply`, `resolve`, `address`, `fixup`, `thread`) + - Search each term across commands, skills, and rules: + ```bash + rg -l "<term>" ~/.cursor/skills/*/SKILL.md ~/.cursor/rules/*.mdc + ``` + - Read any hits that share domain overlap and check for consistency + - If overlap is found, evaluate whether to consolidate per the `dry` principle: can A reference B's rules or a shared file instead of reimplementing? Propose consolidation to the user when the shared logic is non-trivial. +4. **Check dependent callers before any script/command change**: Before adding, updating, renaming, or removing any command, skill, script, step ID, flag, or output contract, search for direct callers/references and update them in the same change. + - Search by skill name, script filename, flag names, and any removed/renamed identifiers: + ```bash + rg -n "<identifier>" ~/.cursor/skills ~/.cursor/rules + ``` + - Do not add/update/remove script behavior until caller impacts are audited and required updates are planned. + - Do not delete or rename a referenced target until all callers are updated. + - In the final response, list which callers were updated. +5. Verify step ordering matches the agent's decision flow +6. Ensure examples are brief and generic (no real repo names, PR numbers, or user data) +7. Check that escape hatches exist for ambiguous cases +8. Confirm companion scripts match the `.md` expectations +9. Convert markdown-structured commands to XML format (this is the most commonly skipped item — `##` headers and bullet lists must become `<goal>`, `<rules>`, `<step>` tags) +10. Apply all current authoring principles (rules-first, scripts-over-reasoning, batch-tool-calls, etc.) even if the original command predates them +11. If the command may run on smaller/faster models, apply `<small-model-conventions>` — especially `file-over-args`, `inline-guardrails`, and `verbatim-bash` +</revision-checklist> + +<post-authoring-actions> +After any authoring change (skills/scripts/rules), ask: + +> Run `/convention-sync` to sync files and update PR conventions/description? + +When `.cursor/rules/*.mdc` files changed, run: + +```bash +~/.cursor/skills/convention-sync/scripts/generate-claude-md.sh +``` + +This keeps `~/.claude/CLAUDE.md` aligned with always-apply rules via the existing convention-sync flow. +</post-authoring-actions> + +<companion-scripts> +Skill-specific scripts go in `<skill>/scripts/`. Shared scripts go in `~/.cursor/skills/` top-level. Conventions: + +- `set -euo pipefail` at the top +- Parse args with a `while/case` loop +- Output structured, one-line-per-action summaries the agent can parse +- Exit code 0 = success, 1 = error, 2 = needs user input +- **Naming**: Name scripts by what they DO, not which command they serve. Scripts will likely be reused by multiple commands. Prefer descriptive, domain-scoped names over command-coupled names: + - `lint-commit.sh` — good (describes the operation) + - `asana-task-update.sh` — good (describes the operation) + - `github-pr-comments.sh` — good (describes the domain + operation) + - `pr-address.sh` — bad (coupled to the `/pr-address` command name) +- Before creating a new script, check if an existing script already covers the operation. Extend it with a new subcommand rather than creating a duplicate. +- **GitHub API**: Default to `gh api` and `gh api graphql` — never raw `curl`. See `gh-cli-over-curl` principle. +</companion-scripts> diff --git a/.cursor/skills/changelog/SKILL.md b/.cursor/skills/changelog/SKILL.md new file mode 100644 index 0000000..9a48ec3 --- /dev/null +++ b/.cursor/skills/changelog/SKILL.md @@ -0,0 +1,10 @@ +--- +name: changelog +description: Update CHANGELOG.md(s) with new entries describing changes made in the repo(s). Use when the user wants to update changelogs. +metadata: + author: j0ntz +--- + +# changelog + +Update the CHANGELOG.md(s) with at most a few new entries describing the changes made in the repo(s). Documented changes should ONLY describe the final state of all the current changes, not the journey, and follow the existing patterns (being sure to parse only a hundred lines to minimize context) for length and formatting, including no word wrapping. \ No newline at end of file diff --git a/.cursor/skills/chat-audit/SKILL.md b/.cursor/skills/chat-audit/SKILL.md new file mode 100644 index 0000000..89411e3 --- /dev/null +++ b/.cursor/skills/chat-audit/SKILL.md @@ -0,0 +1,102 @@ +--- +name: chat-audit +description: Analyze a Cursor chat export to identify inefficiencies, rule violations, and wasted tool calls. Use when the user wants to audit a chat session. +compatibility: Requires node. +metadata: + author: j0ntz +--- + +<goal>Analyze current chat or provided Cursor chat export to identify inefficiencies, rule violations, and wasted tool calls against the invoked command's workflow.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="use-companion-script">Use `scripts/cursor-chat-extract.js` to parse the export. Do NOT parse the raw JSON inline — it is deeply nested and will consume excessive context.</rule> +<rule id="tools-only-default">Default to `--tools-only` mode. Only omit the flag if the user asks for full assistant message analysis.</rule> +<rule id="no-raw-json">Do NOT read the export JSON file directly. All data comes from the script output.</rule> +<rule id="concise-output">Keep the final report under 50 lines. Use a numbered list for findings, not verbose paragraphs.</rule> +</rules> + +<step id="1" name="Extract conversation data"> +If no chat export file is provided, assume the user is asking for a chat audit of the current chat session. + +If chat export file is provided, run the companion script on the user-provided export file: + +```bash +scripts/cursor-chat-extract.js <export-file> --tools-only +``` + +Parse the JSON output. Note the `invokedCommand`, `stats`, and `sequence` fields. + +If `invokedCommand` is null, check the first user message for a command reference and ask the user which command was intended. +</step> + +<step id="2" name="Load the invoked command"> +If `invokedCommand` is identified, read the command file: + +```bash +Read ~/.cursor/skills/<invokedCommand>/SKILL.md +``` + +Extract the command's: +- **Rules** (the `<rule>` tags) +- **Steps** (the `<step>` tags — just names and key instructions, not full content) +- **Companion scripts** referenced (filenames only) +</step> + +<step id="3" name="Analyze tool call sequence"> +Walk through the `sequence` array and check each tool call against the command's prescribed workflow: + +<sub-step name="Rule violations"> +For each rule in the command, check if the tool sequence violates it: +- `commit-script`: Did the agent use raw `git add` + `git commit` instead of `lint-commit.sh`? +- `use-companion-script`: Did the agent call `gh`, `curl`, or API tools directly instead of the prescribed script? +- `no-script-bypass`: Did the agent fall back to raw tools after a script error? +- Cross-reference rules: Did the agent read files referenced with "Read ... now (do NOT skip)"? +</sub-step> + +<sub-step name="Wasted tool calls"> +Flag calls that consumed context without contributing to the workflow: +- **Errors followed by retries** — the error was avoidable (e.g., reading a directory as a file) +- **Redundant reads** — same information gathered multiple times (e.g., `git status` called twice) +- **Unnecessary exploration** — reading code files when the user said the change was already done +- **Sleep-based polling** — `sleep N && tail` instead of using `block_until_ms` +- **Sequential calls that could be parallel** — independent operations run one at a time +</sub-step> + +<sub-step name="Skipped steps"> +For each step in the command, check if the tool sequence includes the corresponding action: +- Missing verification step +- Missing CHANGELOG entry +- Missing Asana linking +- Skipped cross-file reads (e.g., never read `im.md` when step 3 requires it) +</sub-step> +</step> + +<step id="4" name="Generate report"> +Output a structured report: + +``` +## Chat Audit: /<command> + +**Stats:** N tool calls (M errors, K cancelled) across L user messages + +### Rule Violations +1. [rule-id] Description of what happened + +### Wasted Tool Calls +1. [#N] tool_name — why it was wasteful + +### Skipped Steps +1. [step N] What was skipped + +### Recommendations +1. Specific change to the command file that would prevent this +``` + +If the user hasn't asked for command file changes, stop here. If they ask, apply the recommendations using the `/author` skill. +</step> + +<edge-cases> +<case name="No command detected">Ask the user which command was being executed, or analyze without a reference command (just flag errors and wasted calls).</case> +<case name="Multiple user messages">The conversation may span multiple turns. The first user message typically invokes the command; subsequent ones are follow-ups. Analyze the full sequence but weight findings toward the initial command execution.</case> +<case name="Non-command conversation">If no `/command` was invoked, still analyze for general inefficiencies (redundant reads, errors, unnecessary exploration) but skip the rule/step compliance checks.</case> +</edge-cases> diff --git a/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js b/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js new file mode 100755 index 0000000..6908d20 --- /dev/null +++ b/.cursor/skills/chat-audit/scripts/cursor-chat-extract.js @@ -0,0 +1,142 @@ +#!/usr/bin/env node +// cursor-chat-extract.js — Extract structured conversation data from Cursor chat export JSON. +// Usage: ./cursor-chat-extract.js <export.json> [--tools-only] +// Output: Compact JSON summary of messages and tool calls for agent analysis. + +const fs = require("fs"); +const path = require("path"); + +const file = process.argv[2]; +const toolsOnly = process.argv.includes("--tools-only"); + +if (!file) { + console.error("Usage: cursor-chat-extract.js <export.json> [--tools-only]"); + process.exit(1); +} + +let data; +try { + data = JSON.parse(fs.readFileSync(path.resolve(file), "utf8")); +} catch (e) { + console.error(`Failed to parse ${file}: ${e.message}`); + process.exit(1); +} + +const composerId = Object.keys(data.bubbles || {})[0]; +if (!composerId) { + console.error("No conversation found in export."); + process.exit(1); +} + +const entries = data.bubbles[composerId] || []; + +function extractText(val) { + if (val.text && typeof val.text === "string") return val.text; + if (!val.richText) return ""; + try { + const rt = JSON.parse(val.richText); + return walkLexical(rt.root); + } catch { + return ""; + } +} + +function walkLexical(node) { + let out = ""; + if (node.text) out += node.text; + if (node.children) for (const c of node.children) out += walkLexical(c); + return out; +} + +function parseToolData(raw) { + if (!raw) return null; + const d = typeof raw === "string" ? JSON.parse(raw) : raw; + if (!d.name) return null; + + const result = { name: d.name, status: d.status || "unknown" }; + + try { + const params = JSON.parse(d.params || "{}"); + if (params.command) { + result.arg = params.command.length > 150 + ? params.command.substring(0, 150) + "..." + : params.command; + } else if (params.targetFile) { + result.arg = params.targetFile; + } else if (params.globPattern) { + result.arg = `glob: ${params.globPattern}`; + } else if (params.pattern) { + result.arg = `pattern: ${params.pattern}`; + } else if (params.query) { + result.arg = `query: ${params.query.substring(0, 100)}`; + } + } catch { + // Ignore parse failures + } + + return result; +} + +function truncate(text, max) { + if (!text || text.length <= max) return text; + return text.substring(0, max) + "..."; +} + +const messages = []; +let totalTools = 0; +let errors = 0; +let cancellations = 0; + +for (const entry of entries) { + let val; + try { + val = JSON.parse(entry.value); + } catch { + continue; + } + + const type = val.type === 1 ? "user" : "assistant"; + const text = extractText(val); + + const tool = parseToolData(val.toolFormerData); + if (tool) { + totalTools++; + if (tool.status === "error") errors++; + if (tool.status === "cancelled") cancellations++; + messages.push({ type: "tool", ...tool }); + continue; + } + + if (!text.trim()) continue; + + if (type === "user") { + messages.push({ type: "user", text: text.trim() }); + } else if (!toolsOnly) { + messages.push({ + type: "assistant", + text: truncate(text.trim(), 200), + }); + } +} + +// Detect invoked command from first user message +let invokedCommand = null; +const firstUser = messages.find((m) => m.type === "user"); +if (firstUser) { + const match = firstUser.text.match(/^\/([\w-]+)/); + if (match) invokedCommand = match[1]; +} + +const output = { + invokedCommand, + stats: { + messages: messages.filter((m) => m.type === "user").length, + assistantTurns: messages.filter((m) => m.type === "assistant").length, + toolCalls: totalTools, + errors, + cancellations, + }, + sequence: messages, +}; + +console.log(JSON.stringify(output, null, 2)); diff --git a/.cursor/skills/convention-sync/SKILL.md b/.cursor/skills/convention-sync/SKILL.md new file mode 100644 index 0000000..cde8050 --- /dev/null +++ b/.cursor/skills/convention-sync/SKILL.md @@ -0,0 +1,85 @@ +--- +name: convention-sync +description: Sync cursor files between ~/.cursor/ and the edge-conventions repo, commit, push, and update PR description. Use when the user wants to sync conventions. +compatibility: Requires git, gh. +metadata: + author: j0ntz +--- + +<goal>Sync cursor files between `~/.cursor/` and the `edge-conventions` repo, commit, push, and update PR description from README. Also maintains cross-tool compatibility: symlinks `~/.claude/skills` → `~/.cursor/skills` and generates `~/.claude/CLAUDE.md` from always-apply rules.</goal> + +<rules> +<rule id="local-is-canonical">`~/.cursor/` is the canonical source. Edits happen locally; the repo is the distribution copy. Default direction is `user-to-repo`. Use `--repo-to-user` only for onboarding or pulling changes authored by others. The script does not detect bidirectional conflicts — whichever direction you run overwrites the other side.</rule> +<rule id="use-companion-script">Use `~/.cursor/skills/convention-sync/scripts/convention-sync.sh` for diffing and syncing. Do NOT manually diff or copy files.</rule> +<rule id="dry-run-first">Always run without `--stage` first to show the summary. Only stage/commit after user confirms.</rule> +<rule id="no-script-bypass">If the script fails, report the error and STOP.</rule> +<rule id="readme-is-source">`.cursor/README.md` is the source of truth for documentation. The script mirrors it to the PR description automatically.</rule> +<rule id="claude-compat">Every run ensures `~/.claude/skills` symlinks to `~/.cursor/skills` and regenerates `~/.claude/CLAUDE.md` from `alwaysApply: true` rules. This enables OpenCode and Claude Code to discover skills and rules without separate config.</rule> +<rule id="target-repo-resolution">For user-to-repo sync, target the `edge-conventions` checkout. Do NOT assume the current repo is correct just because it contains a `.cursor/` folder. Let the companion script resolve and validate the repo path.</rule> +</rules> + +<step id="1" name="Detect changes and PR status"> +Use the companion script's default repo resolution first. It targets the `edge-conventions` checkout and fails if the resolved or provided repo is not actually `edge-conventions`. + +Run the sync script in dry-run mode: + +```bash +~/.cursor/skills/convention-sync/scripts/convention-sync.sh +``` + +Parse the JSON output and extract `repoDir`. Then check for an open PR: + +```bash +cd <repo-dir> && gh pr view --json number,url --jq '{number: .number, url: .url}' 2>/dev/null || echo '{}' +``` + +Use the resolved repo path from the script for subsequent git and PR commands. If the script reports `total` as 0, report "Everything is in sync" and stop. +</step> + +<step id="2" name="Present summary"> +Show the user a concise summary including PR update status: + +``` +Sync summary (user → repo): + New: file1, file2 + Modified: file3, file4 + Deleted: file5 + Ignored: file6, file7 (via .syncignore) + +PR #N: Will update description from README.md (or "No open PR") + +Commit and push? [y/N] +``` + +If `ignored` array is empty, omit the Ignored line. + +If the user provided a commit message in their prompt, skip the confirmation and proceed. +</step> + +<step id="3" name="Stage, commit, push, update PR"> +Run the script with `--commit`: + +```bash +~/.cursor/skills/convention-sync/scripts/convention-sync.sh <repo-dir> --commit -m "<message>" +``` + +Then push: + +```bash +cd <repo-dir> && git push origin HEAD +``` + +If an open PR exists, update the PR description from README: + +```bash +cd <repo-dir> && gh pr edit --body-file .cursor/README.md +``` +</step> + +<edge-cases> +<case name="Reverse sync (repo → user)">If the user says "pull from repo" or "update my local", run with `--repo-to-user --stage` instead. No git operations needed.</case> +<case name="Current repo has a .cursor folder but is not edge-conventions">Do not sync into that repo. Fall back to `~/git/edge-conventions` or ask for the correct repo path.</case> +<case name="Dry-run resolved a repo path">Reuse the `repoDir` value from the script's JSON output for the PR query, commit run, push, and PR edit steps.</case> +<case name="Selective sync">To permanently exclude files, add glob patterns to `~/.cursor/.syncignore` (one per line, `#` comments). The script skips matching entries and reports them in the `ignored` array. To exclude ad-hoc, remove files from staging with `git reset HEAD .cursor/<file>` before committing.</case> +<case name="No README">If `.cursor/README.md` doesn't exist, skip PR description update and warn the user.</case> +</edge-cases> diff --git a/.cursor/skills/convention-sync/scripts/convention-sync.sh b/.cursor/skills/convention-sync/scripts/convention-sync.sh new file mode 100755 index 0000000..fe0adc3 --- /dev/null +++ b/.cursor/skills/convention-sync/scripts/convention-sync.sh @@ -0,0 +1,259 @@ +#!/usr/bin/env bash +# convention-sync.sh — Sync ~/.cursor/ files with the edge-conventions repo. +# Usage: ./convention-sync.sh [repo-dir] [--stage] [--commit -m "message"] [--repo-to-user] +# Compares ~/.cursor/{skills,rules,scripts} against <repo-dir>/.cursor/ and +# outputs a structured JSON summary of new, modified, and deleted files. +# With --stage: copies changed files and stages them in git (or copies to user dir with --repo-to-user). +# With --commit: stages + commits (requires -m). Only valid for user-to-repo direction. +# +# Sync model: ~/.cursor/ is canonical. Default direction (user-to-repo) copies local +# files into the repo. --repo-to-user is for onboarding or pulling others' changes. +# No bidirectional conflict detection — the chosen direction overwrites the other side. + +set -euo pipefail + +REPO_DIR="" +DO_STAGE=false +DO_COMMIT=false +COMMIT_MSG="" +DIRECTION="user-to-repo" + +resolve_default_repo_dir() { + local cwd remote_url default_repo + + cwd="$(pwd)" + if [[ "$(basename "$cwd")" == "edge-conventions" ]]; then + printf '%s\n' "$cwd" + return 0 + fi + + if git -C "$cwd" rev-parse --is-inside-work-tree >/dev/null 2>&1; then + remote_url="$(git -C "$cwd" remote get-url origin 2>/dev/null || true)" + if [[ "$remote_url" == *"edge-conventions"* ]]; then + printf '%s\n' "$cwd" + return 0 + fi + fi + + default_repo="$HOME/git/edge-conventions" + if [[ -d "$default_repo/.git" || -f "$default_repo/.git" ]]; then + printf '%s\n' "$default_repo" + return 0 + fi + + return 1 +} + +validate_repo_dir() { + local repo_dir remote_url + repo_dir="$1" + + if [[ ! -d "$repo_dir/.cursor" ]]; then + echo "ERROR: Repo directory must contain .cursor/: $repo_dir" >&2 + return 1 + fi + + if [[ "$(basename "$repo_dir")" == "edge-conventions" ]]; then + return 0 + fi + + if git -C "$repo_dir" rev-parse --is-inside-work-tree >/dev/null 2>&1; then + remote_url="$(git -C "$repo_dir" remote get-url origin 2>/dev/null || true)" + if [[ "$remote_url" == *"edge-conventions"* ]]; then + return 0 + fi + fi + + echo "ERROR: Repo directory does not appear to be the edge-conventions checkout: $repo_dir" >&2 + return 1 +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --stage) DO_STAGE=true; shift ;; + --commit) DO_COMMIT=true; DO_STAGE=true; shift ;; + -m) COMMIT_MSG="$2"; shift 2 ;; + --repo-to-user) DIRECTION="repo-to-user"; shift ;; + *) REPO_DIR="$1"; shift ;; + esac +done + +if [[ -z "$REPO_DIR" ]]; then + if ! REPO_DIR="$(resolve_default_repo_dir)"; then + echo "ERROR: Could not resolve the edge-conventions repo. Run with an explicit repo path." >&2 + echo "Usage: convention-sync.sh [repo-dir] [--stage] [--commit -m \"message\"]" >&2 + exit 1 + fi +fi + +if ! validate_repo_dir "$REPO_DIR"; then + exit 1 +fi + +if [[ "$DO_COMMIT" == true && -z "$COMMIT_MSG" ]]; then + echo "ERROR: --commit requires -m \"message\"" >&2 + exit 1 +fi + +USER_DIR="$HOME/.cursor" +REPO_CURSOR="$REPO_DIR/.cursor" +DIRS="skills rules scripts" +SYNCIGNORE="$USER_DIR/.syncignore" + +# Load ignore patterns from .syncignore (one glob per line, # comments, blank lines skipped) +ignore_patterns=() +if [[ -f "$SYNCIGNORE" ]]; then + while IFS= read -r line; do + line="${line%%#*}" # strip comments + line="${line%"${line##*[![:space:]]}"}" # strip trailing whitespace + [[ -z "$line" ]] && continue + ignore_patterns+=("$line") + done < "$SYNCIGNORE" +fi + +is_ignored() { + local entry="$1" + for pattern in "${ignore_patterns[@]+"${ignore_patterns[@]}"}"; do + # shellcheck disable=SC2254 + if [[ "$entry" == $pattern ]]; then + return 0 + fi + done + return 1 +} + +new_json="[]" +mod_json="[]" +del_json="[]" +ignored_json="[]" + +# Check README.md separately (single file, not a directory) +if [[ -f "$USER_DIR/README.md" ]] && ! is_ignored "README.md"; then + if [[ ! -f "$REPO_CURSOR/README.md" ]]; then + new_json=$(echo "$new_json" | jq '. + ["README.md"]') + elif ! diff -q "$USER_DIR/README.md" "$REPO_CURSOR/README.md" >/dev/null 2>&1; then + mod_json=$(echo "$mod_json" | jq '. + ["README.md"]') + fi +elif [[ -f "$REPO_CURSOR/README.md" ]] && ! is_ignored "README.md"; then + del_json=$(echo "$del_json" | jq '. + ["README.md"]') +fi + +for dir in $DIRS; do + user_path="$USER_DIR/$dir" + repo_path="$REPO_CURSOR/$dir" + + [[ -d "$user_path" ]] || continue + + while IFS= read -r rel; do + [[ -z "$rel" ]] && continue + entry="$dir/$rel" + if is_ignored "$entry"; then + ignored_json=$(echo "$ignored_json" | jq --arg f "$entry" '. + [$f]') + continue + fi + repo_file="$repo_path/$rel" + if [[ ! -f "$repo_file" ]]; then + new_json=$(echo "$new_json" | jq --arg f "$entry" '. + [$f]') + elif ! diff -q "$user_path/$rel" "$repo_file" >/dev/null 2>&1; then + mod_json=$(echo "$mod_json" | jq --arg f "$entry" '. + [$f]') + fi + done < <(cd "$user_path" && find . -type f ! -name '.DS_Store' | sed 's|^\./||') + + if [[ -d "$repo_path" ]]; then + while IFS= read -r rel; do + [[ -z "$rel" ]] && continue + entry="$dir/$rel" + is_ignored "$entry" && continue + user_file="$user_path/$rel" + if [[ ! -f "$user_file" ]]; then + del_json=$(echo "$del_json" | jq --arg f "$entry" '. + [$f]') + fi + done < <(cd "$repo_path" && find . -type f ! -name '.DS_Store' | sed 's|^\./||') + fi +done + +total=$(echo "$new_json $mod_json $del_json" | jq -s '.[0] + .[1] + .[2] | length') + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Ensure ~/.claude/skills symlink points to ~/.cursor/skills +CLAUDE_SKILLS="$HOME/.claude/skills" +if [[ -L "$CLAUDE_SKILLS" ]]; then + link_target="$(readlink "$CLAUDE_SKILLS")" + if [[ "$link_target" != "$USER_DIR/skills" ]]; then + rm "$CLAUDE_SKILLS" + ln -s "$USER_DIR/skills" "$CLAUDE_SKILLS" + fi +elif [[ ! -e "$CLAUDE_SKILLS" ]]; then + mkdir -p "$(dirname "$CLAUDE_SKILLS")" + ln -s "$USER_DIR/skills" "$CLAUDE_SKILLS" +fi + +# Regenerate ~/.claude/CLAUDE.md from alwaysApply rules +if [[ -x "$SCRIPT_DIR/generate-claude-md.sh" ]]; then + "$SCRIPT_DIR/generate-claude-md.sh" >/dev/null +fi + +if [[ "$DO_STAGE" == true && "$total" -gt 0 ]]; then + all_copy=$(echo "$new_json $mod_json" | jq -sr '.[0] + .[1] | .[]') + all_del=$(echo "$del_json" | jq -r '.[]') + + if [[ "$DIRECTION" == "user-to-repo" ]]; then + while IFS= read -r f; do + [[ -z "$f" ]] && continue + # README.md is at .cursor/ root, others are in subdirs + if [[ "$f" == "README.md" ]]; then + cp "$USER_DIR/$f" "$REPO_CURSOR/$f" + else + mkdir -p "$(dirname "$REPO_CURSOR/$f")" + cp "$USER_DIR/$f" "$REPO_CURSOR/$f" + fi + done <<< "$all_copy" + + while IFS= read -r f; do + [[ -z "$f" ]] && continue + rm -f "$REPO_CURSOR/$f" + done <<< "$all_del" + + cd "$REPO_DIR" + while IFS= read -r f; do + [[ -z "$f" ]] && continue + git add ".cursor/$f" + done <<< "$all_copy" + + while IFS= read -r f; do + [[ -z "$f" ]] && continue + git rm -f --quiet ".cursor/$f" 2>/dev/null || true + done <<< "$all_del" + + if [[ "$DO_COMMIT" == true ]]; then + git commit -m "$COMMIT_MSG" + fi + else + while IFS= read -r f; do + [[ -z "$f" ]] && continue + if [[ "$f" == "README.md" ]]; then + cp "$REPO_CURSOR/$f" "$USER_DIR/$f" + else + mkdir -p "$(dirname "$USER_DIR/$f")" + cp "$REPO_CURSOR/$f" "$USER_DIR/$f" + fi + done <<< "$all_copy" + + while IFS= read -r f; do + [[ -z "$f" ]] && continue + rm -f "$USER_DIR/$f" + done <<< "$all_del" + fi +fi + +jq -n \ + --arg repoDir "$REPO_DIR" \ + --argjson new "$new_json" \ + --argjson modified "$mod_json" \ + --argjson deleted "$del_json" \ + --argjson ignored "$ignored_json" \ + --argjson total "$total" \ + --arg staged "$DO_STAGE" \ + --arg committed "$DO_COMMIT" \ + '{repoDir: $repoDir, total: $total, new: $new, modified: $modified, deleted: $deleted, ignored: $ignored, staged: ($staged == "true"), committed: ($committed == "true")}' diff --git a/.cursor/skills/convention-sync/scripts/generate-claude-md.sh b/.cursor/skills/convention-sync/scripts/generate-claude-md.sh new file mode 100755 index 0000000..3f793b8 --- /dev/null +++ b/.cursor/skills/convention-sync/scripts/generate-claude-md.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +# generate-claude-md.sh — Generate ~/.claude/CLAUDE.md from alwaysApply .mdc rules. +# Usage: ./generate-claude-md.sh [--dry-run] +# +# Reads all .mdc files in ~/.cursor/rules/ that have alwaysApply: true, +# strips YAML frontmatter, and concatenates them into ~/.claude/CLAUDE.md. + +set -euo pipefail + +RULES_DIR="$HOME/.cursor/rules" +OUTPUT="$HOME/.claude/CLAUDE.md" +DRY_RUN=false + +[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true + +if [[ ! -d "$RULES_DIR" ]]; then + echo "ERROR: $RULES_DIR does not exist" >&2 + exit 1 +fi + +mkdir -p "$(dirname "$OUTPUT")" + +collected=() +skipped=() + +for mdc in "$RULES_DIR"/*.mdc; do + [[ -f "$mdc" ]] || continue + basename="$(basename "$mdc")" + + if head -20 "$mdc" | grep -q '^alwaysApply: true'; then + collected+=("$basename") + else + skipped+=("$basename") + fi +done + +if [[ ${#collected[@]} -eq 0 ]]; then + echo '{"collected":[],"skipped":[],"output":"","dry_run":true}' + exit 0 +fi + +content="# Global Rules\n\n" +content+="# Auto-generated from ~/.cursor/rules/ (alwaysApply: true files only).\n" +content+="# Do not edit manually. Re-generate via convention-sync.\n\n" + +for basename in "${collected[@]}"; do + mdc="$RULES_DIR/$basename" + name="${basename%.mdc}" + + # Strip YAML frontmatter (everything between first --- and second ---) + body=$(awk ' + BEGIN { in_front=0; past_front=0 } + /^---$/ { + if (!past_front) { + if (in_front) { past_front=1; next } + else { in_front=1; next } + } + } + past_front { print } + ' "$mdc") + + # Trim leading blank lines + body=$(echo "$body" | sed '/./,$!d') + + content+="---\n\n" + content+="## $name\n\n" + content+="$body\n\n" +done + +if [[ "$DRY_RUN" == true ]]; then + echo -e "$content" > /dev/null +else + echo -e "$content" > "$OUTPUT" +fi + +# Output JSON summary +collected_json=$(printf '%s\n' "${collected[@]}" | jq -R . | jq -s .) +skipped_json=$(printf '%s\n' "${skipped[@]}" | jq -R . | jq -s .) + +jq -n \ + --argjson collected "$collected_json" \ + --argjson skipped "$skipped_json" \ + --arg output "$OUTPUT" \ + --arg dry_run "$DRY_RUN" \ + '{collected: $collected, skipped: $skipped, output: $output, dry_run: ($dry_run == "true")}' diff --git a/.cursor/skills/dep-pr/SKILL.md b/.cursor/skills/dep-pr/SKILL.md new file mode 100644 index 0000000..10f02fb --- /dev/null +++ b/.cursor/skills/dep-pr/SKILL.md @@ -0,0 +1,103 @@ +--- +name: dep-pr +description: Create a dependent Asana task in another repo and run the full PR workflow for it. Use when the user needs cross-repo dependent task creation. +compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration. +metadata: + author: j0ntz +--- + +<goal>Create a dependent Asana task in another repo and run the full PR workflow for it — automating cross-repo task creation, dependency linking, implementation, and PR creation.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="parent-required">A parent Asana task URL is always required. It provides context, project placement, and dependency linking.</rule> +<rule id="check-existence">Always check if a dependent task already exists before creating one. The script handles this — respect the `CREATED: false` output.</rule> +<rule id="script-timeouts">Asana scripts can take up to 90s. Always set `block_until_ms: 120000`.</rule> +<rule id="no-impl-before-task">Do NOT begin implementation until the dependent task is created and linked.</rule> +<rule id="same-project">The dependent task MUST be created in the same project(s) as the parent task, including release-version project tags (for example `4.46.0`). The script handles this automatically by copying all parent project memberships.</rule> +<rule id="initial-assignee">The dependent task is automatically assigned to the current user (resolved via `asana-whoami.sh`). Do NOT hardcode a user GID — omit `--assignee` to let the script auto-resolve.</rule> +</rules> + +<dependency-hierarchy description="Repo dependency structure. Lower-level repos block higher-level repos."> +The Edge repos have a layered dependency structure: + +``` +core (lowest — types, APIs, runtime) + ↑ +accb / exch (middle — currency and exchange plugins, depend on core) + ↑ +gui (highest — UI, depends on all others) +``` + +**Dependency direction rule**: When creating a dependent task for a repo at a **lower or equal** level, the new task **blocks** the parent task. This is the standard case — e.g., an `accb:` task blocks the `gui:` parent because the plugin change must land first. + +If the target repo is at a **higher** level than the parent (e.g., creating a `gui:` task from an `accb:` parent), this is unusual. Ask the user to confirm before proceeding — the dependency direction may need to be reversed (parent blocks the new task instead). + +| Level | Repos | +|-------|-------| +| 3 (highest) | `gui` | +| 2 | `accb`, `exch` | +| 1 (lowest) | `core` | + +</dependency-hierarchy> + +<repo-map description="Shorthand prefixes to repo directories and branch bases."> + +| Prefix | Repository | Directory | Branch from | +|--------|-----------|-----------|-------------| +| `gui` | `edge-react-gui` | `~/git/edge-react-gui` | `develop` | +| `exch` | `edge-exchange-plugins` | `~/git/edge-exchange-plugins` | `master` | +| `accb` | `edge-currency-accountbased` | `~/git/edge-currency-accountbased` | `master` | +| `core` | `edge-core-js` | `~/git/edge-core-js` | `master` | + +</repo-map> + +<step id="1" name="Resolve parent task and target repo"> +The user provides a parent Asana task URL and a target repo (as a prefix or full name). + +1. **Extract the parent task GID** from the URL. +2. **Fetch parent task context** using `asana-get-context.sh` to understand what work is needed. +3. **Determine the target repo** from the user's input. If not specified, ask. +4. **Validate dependency direction** using the hierarchy table. If the target is at a higher level than the parent, warn and ask for confirmation. +</step> + +<step id="2" name="Create dependent task"> +Derive the dependent task name from the parent: `<target-prefix>: <parent task name without its prefix>`. + +If the parent task name already has a prefix (e.g. `gui: Some feature`), strip it and replace with the target prefix. If no prefix, prepend the target prefix. + +```bash +~/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh \ + --parent <parent_gid> \ + --name "<prefix>: <task name>" \ + --notes "<description referencing parent task>" +``` + +The script: +- Checks if a matching dependency already exists (by name) — if so, outputs `CREATED: false` and the existing GID +- Creates the task in all parent project memberships (including release-version tags) +- Copies priority, status, and `Planned` from the parent +- Assigns to the current user (auto-resolved via `asana-whoami.sh`) +- Sets the new task as a blocking dependency of the parent + +If `CREATED: false`, report the existing task to the user and continue with the existing GID. +</step> + +<step id="3" name="Implement and PR"> +Delegate to the `pr-create.md` workflow using the **new** (or existing) task URL: + +1. `cd` to the target repo directory (see repo-map). +2. **Read `~/.cursor/skills/pr-create/SKILL.md` now** (use the Read tool — do NOT skip this). Then follow its steps 1-6 (push, verify, build PR description, create PR, optional Asana updates, report). + +The Asana task context from step 1 provides the implementation requirements. The agent already has full context from the parent task. +</step> + +<step id="4" name="Report"> +Display both the new Asana task and the PR as clickable links. Note the dependency relationship. +</step> + +<edge-cases> +<case name="Dependent task already exists">The script detects this. Report: "Found existing dependent task: [link]. Continuing with PR workflow." Then proceed to step 3.</case> +<case name="Parent task has no project">The script falls back to the first available project. Warn the user if the placement looks wrong.</case> +<case name="Target repo already has a matching branch">Step 3 delegates to `pr-create.md` which handles branch state assessment.</case> +<case name="Upward dependency (higher-level target)">Ask: "Creating a [gui] task from a [core] parent is unusual — the dependency direction would be reversed. Confirm? (yes/no)"</case> +</edge-cases> diff --git a/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh b/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh new file mode 100755 index 0000000..968627c --- /dev/null +++ b/.cursor/skills/dep-pr/scripts/asana-create-dep-task.sh @@ -0,0 +1,245 @@ +#!/usr/bin/env bash +# asana-create-dep-task.sh +# Create a dependent Asana task that blocks a parent task. +# Checks for existing dependencies first to avoid duplicates. +# +# Usage: +# asana-create-dep-task.sh --parent <parent_gid> --name "task name" [--notes "description"] [--assignee <user_gid>] +# +# If --assignee is omitted, the task is assigned to the current user +# (resolved via asana-whoami.sh). +# +# Requires env var: ASANA_TOKEN +# +# Output: +# TASK_GID: <gid> +# TASK_URL: <url> +# CREATED: true|false (false if task already existed) +# ASSIGNED_TO: <user_gid> +# FIELDS_SET: priority=<val>, status=<val>, planned=<val>, reviewer=<name>, implementor=<name> +# DEPENDENCY_SET: <new_gid> blocks <parent_gid> +# +# Exit codes: 0 = success, 1 = error +set -euo pipefail + +PARENT_GID="" +TASK_NAME="" +TASK_NOTES="" +ASSIGNEE_GID="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --parent) PARENT_GID="$2"; shift 2 ;; + --name) TASK_NAME="$2"; shift 2 ;; + --notes) TASK_NOTES="$2"; shift 2 ;; + --assignee) ASSIGNEE_GID="$2"; shift 2 ;; + *) echo "Unknown flag: $1" >&2; exit 1 ;; + esac +done + +if [[ -z "$PARENT_GID" || -z "$TASK_NAME" ]]; then + echo "Usage: asana-create-dep-task.sh --parent <gid> --name <name> [--notes <desc>] [--assignee <gid>]" >&2 + exit 1 +fi + +if [[ -z "${ASANA_TOKEN:-}" ]]; then + echo "Error: ASANA_TOKEN not set" >&2 + exit 1 +fi + +API="https://app.asana.com/api/1.0" +AUTH="Authorization: Bearer $ASANA_TOKEN" + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +# Auto-resolve current user GID (used for assignee and implementor) +CURRENT_USER_GID=$("$SCRIPT_DIR/../../asana-whoami.sh" 2>/dev/null || true) + +# Auto-resolve assignee to current user if not provided +if [[ -z "$ASSIGNEE_GID" ]]; then + ASSIGNEE_GID="$CURRENT_USER_GID" +fi + +# Phase 1: Check if a dependency with a matching name already exists +existing=$(curl -s "$API/tasks/$PARENT_GID/dependencies?opt_fields=name&limit=100" \ + -H "$AUTH" | python3 -c " +import sys, json +data = json.load(sys.stdin).get('data', []) +target = '''$TASK_NAME''' +for dep in data: + if dep.get('name', '').strip().lower() == target.strip().lower(): + print(dep['gid']) + sys.exit(0) +print('') +") + +if [[ -n "$existing" ]]; then + echo "TASK_GID: $existing" + echo "TASK_URL: https://app.asana.com/0/0/$existing" + echo "CREATED: false" + exit 0 +fi + +# Phase 2: Get parent task's project and custom fields to copy +parent_info=$(curl -s "$API/tasks/$PARENT_GID?opt_fields=workspace.gid,memberships.project.gid,memberships.project.name,custom_fields.gid,custom_fields.enum_value.gid,custom_fields.enum_value.name,custom_fields.people_value.gid,custom_fields.people_value.name" \ + -H "$AUTH") + +read -r WORKSPACE_GID PROJECT_GIDS PRIORITY_INFO STATUS_INFO PLANNED_INFO REVIEWER_INFO < <(echo "$parent_info" | python3 -c " +import sys, json, re +data = json.load(sys.stdin)['data'] +ws = data.get('workspace', {}).get('gid', '') + +# Collect all parent projects (including release-version projects like 4.46.0) +projects = [] +for m in data.get('memberships', []): + p = m.get('project', {}) + gid = p.get('gid', '') + if gid: + projects.append(gid) +if not projects and data.get('memberships'): + projects.append(data['memberships'][0]['project']['gid']) +proj_str = ','.join(projects) + +# Field GIDs (stable known fields) +ENUM_FIELDS = { + '795866930204488': 'priority', + '1190660107346181': 'status', +} +PEOPLE_FIELDS = { + '1203334388004673': 'reviewer', +} + +enum_results = {} +people_results = {} + +for f in data.get('custom_fields', []): + fgid = f['gid'] + if fgid in ENUM_FIELDS and f.get('enum_value'): + label = ENUM_FIELDS[fgid] + enum_results[label] = (fgid, f['enum_value']['gid'], f['enum_value'].get('name', '')) + # "Planned" is workspace-specific, so detect by field name: + if f.get('name') == 'Planned' and f.get('enum_value'): + enum_results['planned'] = ( + fgid, + f['enum_value']['gid'], + f['enum_value'].get('name', '') + ) + if fgid in PEOPLE_FIELDS: + label = PEOPLE_FIELDS[fgid] + pv = f.get('people_value', []) + if pv: + people_results[label] = (fgid, pv[0]['gid'], pv[0].get('name', '')) + +def fmt_enum(key): + if key in enum_results: + return ':'.join(enum_results[key]) + return '::' + +def fmt_people(key): + if key in people_results: + return ':'.join(people_results[key]) + return '::' + +print(f\"{ws} {proj_str} {fmt_enum('priority')} {fmt_enum('status')} {fmt_enum('planned')} {fmt_people('reviewer')}\") +") + +PRIORITY_FIELD=$(echo "$PRIORITY_INFO" | cut -d: -f1) +PRIORITY_ENUM=$(echo "$PRIORITY_INFO" | cut -d: -f2) +PRIORITY_NAME=$(echo "$PRIORITY_INFO" | cut -d: -f3) +STATUS_FIELD=$(echo "$STATUS_INFO" | cut -d: -f1) +STATUS_ENUM=$(echo "$STATUS_INFO" | cut -d: -f2) +STATUS_NAME=$(echo "$STATUS_INFO" | cut -d: -f3) +PLANNED_FIELD=$(echo "$PLANNED_INFO" | cut -d: -f1) +PLANNED_ENUM=$(echo "$PLANNED_INFO" | cut -d: -f2) +PLANNED_NAME=$(echo "$PLANNED_INFO" | cut -d: -f3) +REVIEWER_FIELD=$(echo "$REVIEWER_INFO" | cut -d: -f1) +REVIEWER_GID=$(echo "$REVIEWER_INFO" | cut -d: -f2) +REVIEWER_NAME=$(echo "$REVIEWER_INFO" | cut -d: -f3) + +# Auto-resolve implementor to current user +IMPLEMENTOR_FIELD="1203334386796983" +IMPLEMENTOR_GID="$CURRENT_USER_GID" +IMPLEMENTOR_NAME="current user" + +# Phase 3: Create the task +NOTES_JSON=$(python3 -c "import json; print(json.dumps('''$TASK_NOTES'''))") + +# Build projects list from comma-separated GIDs +IFS=',' read -ra PROJECT_ARR <<< "$PROJECT_GIDS" + +new_task=$(curl -s "$API/tasks" \ + -H "$AUTH" \ + -H "Content-Type: application/json" \ + -d "$(python3 -c " +import json +projects = '''$PROJECT_GIDS'''.split(',') +assignee = '''$ASSIGNEE_GID''' or None +data = { + 'data': { + 'name': '''$TASK_NAME''', + 'notes': $NOTES_JSON, + 'projects': [p for p in projects if p], + 'workspace': '$WORKSPACE_GID' + } +} +if assignee: + data['data']['assignee'] = assignee +print(json.dumps(data)) +")") + +NEW_GID=$(echo "$new_task" | python3 -c " +import sys, json +data = json.load(sys.stdin) +if 'errors' in data: + print('ERROR: ' + json.dumps(data['errors']), file=sys.stderr) + sys.exit(1) +print(data['data']['gid']) +") + +if [[ -z "$NEW_GID" || "$NEW_GID" == "ERROR"* ]]; then + echo "Error creating task" >&2 + exit 1 +fi + +# Phase 3b: Set copied fields via shared updater script +UPDATE_CMD=("$SCRIPT_DIR/../../asana-task-update/scripts/asana-task-update.sh" "--task" "$NEW_GID") +if [[ -n "$PRIORITY_ENUM" ]]; then + UPDATE_CMD+=("--set-priority" "$PRIORITY_ENUM") +fi +if [[ -n "$STATUS_ENUM" ]]; then + UPDATE_CMD+=("--set-status" "$STATUS_ENUM") +fi +if [[ -n "$PLANNED_ENUM" ]]; then + UPDATE_CMD+=("--set-planned" "$PLANNED_ENUM") +fi +if [[ -n "$REVIEWER_GID" ]]; then + UPDATE_CMD+=("--set-reviewer" "$REVIEWER_GID") +fi +if [[ -n "$IMPLEMENTOR_GID" ]]; then + UPDATE_CMD+=("--set-implementor" "$IMPLEMENTOR_GID") +fi +if [[ ${#UPDATE_CMD[@]} -gt 3 ]]; then + "${UPDATE_CMD[@]}" > /dev/null +fi + +FIRST_PROJECT=$(echo "$PROJECT_GIDS" | cut -d, -f1) +echo "TASK_GID: $NEW_GID" +echo "TASK_URL: https://app.asana.com/0/$FIRST_PROJECT/$NEW_GID" +echo "CREATED: true" +[[ -n "$ASSIGNEE_GID" ]] && echo "ASSIGNED_TO: $ASSIGNEE_GID" + +# Phase 4: Set as blocking dependency +curl -s -X POST "$API/tasks/$PARENT_GID/addDependencies" \ + -H "$AUTH" \ + -H "Content-Type: application/json" \ + -d "{\"data\": {\"dependencies\": [\"$NEW_GID\"]}}" > /dev/null + +echo "DEPENDENCY_SET: $NEW_GID blocks $PARENT_GID" + +fields_msg="" +[[ -n "$PRIORITY_NAME" ]] && fields_msg="priority=$PRIORITY_NAME" +[[ -n "$STATUS_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }status=$STATUS_NAME" +[[ -n "$PLANNED_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }planned=$PLANNED_NAME" +[[ -n "$REVIEWER_NAME" ]] && fields_msg="${fields_msg:+$fields_msg, }reviewer=$REVIEWER_NAME" +[[ -n "$IMPLEMENTOR_GID" ]] && fields_msg="${fields_msg:+$fields_msg, }implementor=$IMPLEMENTOR_NAME" +[[ -n "$fields_msg" ]] && echo "FIELDS_SET: $fields_msg" diff --git a/.cursor/skills/fix-eslint/SKILL.md b/.cursor/skills/fix-eslint/SKILL.md new file mode 100644 index 0000000..8904d9d --- /dev/null +++ b/.cursor/skills/fix-eslint/SKILL.md @@ -0,0 +1,108 @@ +--- +name: fix-eslint +description: Fix ESLint warnings by applying documented patterns. Use when addressing @typescript-eslint/no-deprecated warnings for NavigationBase, RouteProp, or other deprecated types in edge-react-gui. +--- + +<goal>Resolve ESLint `@typescript-eslint/no-deprecated` warnings by replacing deprecated type references with their non-deprecated equivalents.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="tsc-after-fix">Run `npx tsc --noEmit` after every type change to verify no new type errors are introduced.</rule> +<rule id="no-suppress">Do not suppress deprecation warnings with `eslint-disable` comments. Fix the underlying type reference. +Exception: `NavigationBase` deprecation in shared cross-navigator code (Categories C, D, F below) is accepted — not suppressed, genuinely not fixable without a broader v7 navigation migration. When the fix scope is too broad, add a TODO comment documenting the required migration pattern and accept the warning.</rule> +<rule id="scope-control">Only modify files with deprecation warnings. Do not refactor downstream declarations unless required for the fix to compile.</rule> +</rules> + +<patterns> + +<pattern id="navigation-base" rule="@typescript-eslint/no-deprecated" symbol="NavigationBase"> +`NavigationBase` is a flat navigation type hack in `routerTypes.tsx` that unions all navigator param lists (`RootParamList & DrawerParamList & EdgeAppStackParamList & ...`) to pretend the app is flat. It is deprecated because it tracks **react-navigation v7 breaking changes**: + +1. `navigate()` no longer crosses nested navigator boundaries at runtime. +2. `navigate()` no longer goes back to an existing screen to update params — use `popTo()` or `navigate(screen, params, { pop: true })` instead. + +v7 provides `navigateDeprecated()` and `navigationInChildEnabled` as temporary bridges, both removed in v8. **Do NOT create non-deprecated aliases** (like `AppNavigation`) — this hides a real migration requirement. + +Fix `NavigationBase` deprecation by identifying which category the usage falls into: + +**Category A — Pass-through props** (component accepts `NavigationBase` only to forward it to children or actions): +- Fix: Remove the `navigation` prop. Callers already have navigation in scope. If the child needs navigation, it should use `useNavigation()` or accept specific callbacks. +```typescript +// Before — CancellableProcessingScene accepts navigation to forward to onError +interface Props { navigation: NavigationBase; onError: (nav: NavigationBase, err: unknown) => void } + +// After — remove navigation prop, callers handle navigation in callbacks +interface Props { onError: (err: unknown) => Promise<void> } +``` + +**Category B — Direct navigation in non-scene components** (component accepts `NavigationBase`, calls `navigate()`/`push()` directly): +- Fix: Replace `navigation: NavigationBase` prop with `useNavigation()` hook typed to the navigator context the component lives in. Or replace with specific navigation callbacks from the parent scene. +```typescript +// Before — BalanceCard accepts NavigationBase, calls navigate directly +interface Props { navigation: NavigationBase } +const BalanceCard: React.FC<Props> = props => { + props.navigation.push('send2', { walletId, tokenId }) +} + +// After (option 1) — useNavigation hook +const BalanceCard: React.FC<Props> = props => { + const navigation = useNavigation<EdgeAppSceneProps<'home'>['navigation']>() + navigation.push('send2', { walletId, tokenId }) +} + +// After (option 2) — navigation callbacks +interface Props { onSend: (walletId: string, tokenId: EdgeTokenId) => void } +``` +- If the fix would cascade to many callers or require determining the correct navigator context across multiple usages, add a `// TODO: Replace NavigationBase with useNavigation() or callbacks. Requires v7 navigation migration.` comment and move on. + +**Category C — Shared action/thunk functions** (functions in `src/actions/` accept `NavigationBase`): +- Fix: Invert control. Replace the `navigation: NavigationBase` parameter with a callback for the navigation action the function needs. +```typescript +// Before — function navigates internally +function activateWalletTokens(navigation: NavigationBase, wallet, tokenIds): ThunkAction<Promise<void>> { + // ... calls navigation.navigate('editToken', ...) internally +} + +// After — caller provides the navigate action +function activateWalletTokens(wallet, tokenIds, onNavigate: (route: string, params: object) => void): ThunkAction<Promise<void>> { + // ... calls onNavigate('editToken', ...) instead +} +``` +- Simpler alternative for single-navigate functions: Return the target route + params instead of navigating; let the caller dispatch. +- If the function has many navigate calls to different screens or the refactoring would touch many callers, add a `// TODO: Remove NavigationBase dependency. Requires inversion of navigation control for v7 migration.` comment and move on. + +**Category D — Shared modal components** (modals accept `NavigationBase`, navigate after user interaction): +- Fix: Modal returns a result via Airship bridge resolve; caller handles navigation based on the result. Or modal accepts navigation callbacks. +- If the modal's navigation logic is complex (multiple paths), add a comment and move on. + +**Category E — Scene component casts** (`navigation as NavigationBase`): +- These casts exist because the scene passes navigation to a Category A-D consumer. +- Fix: No direct fix needed — casts disappear automatically when the consumer is migrated. +- If the scene has its own `NavigationBase` usage unrelated to shared code, apply Category B fix. + +**Category F — Service components** (non-scene services: `DeepLinkingManager`, `AccountCallbackManager`, etc.): +- These are the broadest migration cases. Always add: `// TODO: Remove NavigationBase dependency. Requires broader v7 navigation migration for service-level navigation.` +- Do not attempt to fix these incrementally — they are cross-cutting and require dedicated migration work. +</pattern> + +<pattern id="route-prop" rule="@typescript-eslint/no-deprecated" symbol="RouteProp"> +Replace deprecated `RouteProp<'routeName'>` with the scene-specific route type. + +```typescript +// Before +import type { RouteProp } from '../../types/routerTypes' +const route = useRoute<RouteProp<'walletDetails'>>() + +// After +import type { WalletsTabSceneProps } from '../../types/routerTypes' +const route = useRoute<WalletsTabSceneProps<'walletDetails'>['route']>() +``` + +Choose the scene props type that matches the navigator the component lives in: +- `WalletsTabSceneProps` for walletList, walletDetails, transactionList, transactionDetails +- `EdgeAppSceneProps` for routes in EdgeAppStackParamList +- `SwapTabSceneProps` for swap routes +- `BuySellTabSceneProps` for buy/sell routes +- `RootSceneProps` for login, home, etc. +</pattern> + +</patterns> diff --git a/.cursor/skills/im/SKILL.md b/.cursor/skills/im/SKILL.md new file mode 100644 index 0000000..7094d19 --- /dev/null +++ b/.cursor/skills/im/SKILL.md @@ -0,0 +1,164 @@ +--- +name: im +description: Implement an Asana task or ad-hoc feature/fix with clean, structured commits. Use when the user wants to implement a task, build a feature, or fix a bug in an Edge repository. +compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration. +metadata: + author: j0ntz +--- + +<goal>Implement an Asana task or ad-hoc feature/fix with clean, well-structured commits.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="read-coding-standards">Before writing ANY code, read `.cursor/rules/typescript-standards.mdc` and follow all rules and standards in it throughout the implementation.</rule> +<rule id="no-impl-before-confirm">Do NOT begin implementation until the user confirms the `/asana-plan` output (Step 0).</rule> +<rule id="lint-before-change">Before the first edit to ANY file, run `~/.cursor/skills/im/scripts/lint-warnings.sh <files...>` to auto-fix auto-fixable lint issues, then load any remaining lint findings and matching fix patterns into context. If the script changes files or leaves findings, handle those in a separate lint-fix commit IMMEDIATELY BEFORE the commit with actual changes. This applies to every file you touch, including ones discovered mid-implementation — not just the files you planned upfront.</rule> +<rule id="no-manual-formatting">Do not manually fix formatting. `lint-commit.sh` runs `eslint --fix` (which includes Prettier) before committing. If you see a formatting lint after editing, do NOT make another edit to fix it.</rule> +<rule id="commit-script">Always commit using `~/.cursor/skills/lint-commit.sh -m "message" [files...]` or `--fixup <hash>` for fixup commits.</rule> +<rule id="generated-companion-files">When committing with scoped file arguments, treat `src/locales/strings`, `eslint.config.mjs`, and snapshot files as expected auto-generated companion files in the same commit. If `lint-commit.sh` reports additional non-generated files outside the intended scope, evaluate whether the commit plan is wrong before continuing.</rule> +<rule id="clean-history">The final commit history must read as a clean, straight-line progression — as if every decision was made correctly up front. Never preserve the "squiggly path" of development (adding then removing code, temporary scaffolding, exploratory commits). If you introduce something in commit A and remove it in commit B, restructure so the final history never contains it. Plan commits proactively to avoid this; when it happens anyway, restructure the branch before finishing.</rule> +<rule id="no-script-bypass">If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or other workarounds.</rule> +<rule id="script-timeouts">`asana-get-context.sh` can take up to 90s and `install-deps.sh` can exceed 10s on repo prepare steps. Always use at least a 120000ms timeout for these scripts to avoid false failures from client-side time limits.</rule> +</rules> + +<step id="0" name="Planning handoff via /asana-plan"> +Always delegate planning to `~/.cursor/skills/asana-plan/SKILL.md` first: + +- If user provided an Asana URL, run `/asana-plan` in Asana mode. +- If user provided ad-hoc text or file references, run `/asana-plan` in text/file mode. + +`/asana-plan` returns a plan file path + short execution summary and waits for user confirmation. Start implementation only after that confirmation. + +### Regression analysis + +If the task describes a regression (e.g. "broke in version X", "stopped working after update"): + +1. **Identify the breaking commit** using `git log`, `git bisect`, or version tag comparison. Don't take the reported version from the task at face value — verify by examining the actual commit history. +2. **Review the original change's full intent.** Find the associated PR and any linked tasks/discussions. The regression-causing commit likely had legitimate goals (performance, refactoring, new features). Understand ALL of its intended effects, not just the one that broke. +3. **Ensure the fix preserves the original intent.** The fix must not undo the beneficial changes introduced by the regression commit. If the fix conflicts with the original intent, flag this to the user with tradeoffs before proceeding. + </step> + +<step id="1" name="Branch setup"> +After Step 0 determines the target repo (or if no Asana task, use the current repo): + +1. **Stash any uncommitted changes** (including untracked files) before switching branches: `git stash -u` +2. Determine the correct branch state: + - **Wrong repo**: `cd` to the correct workspace repo directory. + - **On an unrelated feature branch**: Switch to the base branch (see "Branch from" column in `task-review.md`), then create a new feature branch. + - **On the base branch**: Create a new feature branch. + - **On the correct feature branch**: Continue. +3. **Branch naming**: `$GIT_BRANCH_PREFIX/<short-description>` or `$GIT_BRANCH_PREFIX/fix/<short-description>` for bug fixes. Use kebab-case. Example: `<prefix>/some-feature` or `<prefix>/fix/some-bug` +4. **Assume a new branch is needed** unless the current branch clearly matches the task. Do NOT ask for confirmation — the existing branch has its own committed work and is unaffected. +5. **Install dependencies**: After creating or switching to the feature branch, run `~/.cursor/skills/install-deps.sh` with a timeout of at least 120000ms to ensure dependencies match the base branch state without false timeout failures. + +If the task spans multiple repos, note the additional repos but implement in the primary repo first. +</step> + +<step id="2" name="Pre-change lint check"> +**Before writing ANY code**, run `lint-warnings.sh` on every file you plan to modify: + +```bash +~/.cursor/skills/im/scripts/lint-warnings.sh <file1> <file2> ... +``` + +This script: + +1. Runs `eslint --fix` +2. Detects files that will be "graduated" from the warning suppression list on commit, promoting their suppressed-rule warnings to errors in the output +3. Shows any remaining findings grouped by rule (with graduation promotions already applied) +4. Outputs matching fix patterns from `~/.cursor/rules/typescript-standards.mdc` +5. Flags unmatched rules that need new patterns added + +If the script auto-fixes files or remaining findings exist: + +1. Fix all reported **errors** first — these include graduation-promoted warnings that will block `lint-commit.sh` after the file is removed from the suppression list +2. Fix remaining **warnings** using the matched patterns in the output +3. For **unmatched rules**: After fixing, add a new `<pattern id="..." rule="...">` to `typescript-standards.mdc` so future occurrences have guidance +4. Commit the pre-existing lint changes separately: + ```bash + ~/.cursor/skills/lint-commit.sh -m "Fix lint warnings in <ComponentName>" <file1> <file2> ... + ``` + +**Architectural vs mechanical fixes**: If a pattern notes "architectural change" (e.g., `styled()` refactoring), flag to user rather than fixing inline — these changes have broader impact and may warrant separate discussion. + +`lint-commit.sh` treats passed file arguments as the primary commit scope and only stages those files plus generated companion files (`src/locales/strings`, `eslint.config.mjs`, snapshots). It does not stage unrelated dirty files in the working tree. + +This ensures the subsequent feature commit introduces zero pre-existing lint findings. This is the initial pass — if you discover additional files to modify during Step 3, the same check applies (see Step 3). +</step> + +<step id="3" name="Implementation"> +1. **Lint-check newly discovered files**: If you need to modify a file not covered in Step 2, run `~/.cursor/skills/im/scripts/lint-warnings.sh <file>` before editing it. If the script auto-fixes the file or leaves remaining pre-existing findings, commit those changes as a `--fixup` to the lint-fix commit from Step 2 (use `git log --oneline` to find the hash). If no lint-fix commit exists yet, create one. +2. Break up the feature into multiple commits if necessary. Commit messages should be a concise title without tags like "feat" and a short body. +3. Open relevant ts/tsx files before writing code. +4. Commit using `lint-commit.sh`: + ```bash + ~/.cursor/skills/lint-commit.sh -m "commit message" [files...] + ``` + You can optionally pass specific files to scope the commit. +5. **Fixup commits**: When a change logically amends an earlier commit on the branch (e.g. fixing a typo from commit A, adding a missed import for commit B, adjusting behavior introduced in a prior commit), use a fixup commit instead of a standalone commit: + ```bash + ~/.cursor/skills/lint-commit.sh --fixup <hash> [files...] + ``` + This marks the commit for automatic squashing into the target commit. Use `git log --oneline` to find the target hash. +6. Include a `CHANGELOG.md` entry in the **last feature commit** (not a separate commit) using format: `- type: description` + - Types: `added`, `changed`, `fixed` + - Example: `- added: New short feature description` + - Entries are grouped by type in order: all `added`, then all `changed`, then all `fixed` + - CHANGELOG.md must ONLY appear in the last commit — never in intermediate feature commits + - Avoid reading more than 50 lines of the file + - **Which section** (see CHANGELOG placement rules below) +</step> + +<edge-cases name="edge-react-gui only"> +The following apply only when working in the `edge-react-gui` repo: + +- New string literals should be added to `en_US.ts` in the SAME commit that uses them, not in a separate commit. The `lint-commit.sh` script handles `yarn localize` automatically when `en_US.ts` is in the changeset. +- **Editing `en_US.ts`**: Use grep to find exact insertion points rather than reading the file in chunks. The file is ~2500 lines; reading it piecemeal wastes context. Example: + ```bash + rg -n "nearby_string_key" src/locales/en_US.ts + ``` + Then use StrReplace with minimal context — only enough surrounding lines to make the match unique. Do NOT reformat existing lines in the replacement. + +### CHANGELOG placement (edge-react-gui) + +`edge-react-gui` has two active CHANGELOG sections: `## Unreleased (develop)` and `## X.Y.Z (staging)`. Which section to target depends on the Asana task's version project: + +1. **Read the staging version** from CHANGELOG: grep for `^## [0-9].*staging` to get the version (e.g. `4.43.0`). +2. **Read the task's version project** from the `VERSION_PROJECT` field in the Asana context output (e.g. `4.44.0`). +3. **Compare**: + - If `VERSION_PROJECT` matches the staging version → add entry under the `## X.Y.Z (staging)` heading. + - If `VERSION_PROJECT` does NOT match (or is not set) → add entry under `## Unreleased (develop)`. +4. If no Asana context was fetched, default to `## Unreleased`. + +Other repos only have `## Unreleased` — no staging distinction. +</edge-cases> + +<step id="4" name="History cleanup"> +**Always run this step** — do not skip it and do not ask for permission. Review the branch history against the `clean-history` rule and automatically fix any issues found. + +1. **Check for an open PR**: Run `gh pr view --json url,reviews 2>/dev/null` to determine if a PR exists and whether it has human review comments. +2. **If a PR exists with human review comments**, skip cleanup — rewriting history would lose review context. Note the pending cleanup in the retrospective. +3. **Otherwise (no PR, or PR with no human reviews)**, always perform ALL applicable cleanup automatically: + - **Fixup commits exist**: Autosquash with `rm -f .git/index.lock && GIT_SEQUENCE_EDITOR=true git rebase -i --autosquash <base-branch>`. Do this immediately — never leave fixup commits unsquashed. + - **Reorder commits**: Use the companion script to reorder commits to the desired order. Hashes are oldest-to-newest: + ```bash + ~/.cursor/skills/im/scripts/reorder-commits.sh <base-branch> <hash1> <hash2> ... + ``` + The script handles index lock cleanup, awk-based reordering, and verifies the tree is unchanged afterward. + - **Structural issues** (add-then-remove cycles, misplaced changes, commits that should be squashed, CHANGELOG in intermediate commits): Use `reorder-commits.sh` for reordering. For squash/drop operations, use `rm -f .git/index.lock && GIT_SEQUENCE_EDITOR="..." git rebase -i <base-branch>` with an awk or sed script. Verify the final tree matches the pre-restructure state with `git diff`. + </step> + +<step id="5" name="Verification"> +Run full verification to catch issues that per-commit checks (`lint-commit.sh`) may have missed (e.g. transitive snapshot breakage, type errors across files): + +```bash +~/.cursor/skills/verify-repo.sh . --base <upstream-ref> +``` + +Where `<upstream-ref>` is `origin/develop` for `edge-react-gui` or `origin/master` for other repos. Set `block_until_ms: 120000`. + +If verification fails, fix the issue with a fixup commit targeting the responsible commit, then re-run history cleanup (step 4) and verification. +</step> + +<step id="6" name="Retrospective"> +When finished, evaluate the context and propose potential improvements to this process — mistakes or errors in the tool calls, ways to improve excessive context bloat, etc. +</step> diff --git a/.cursor/skills/im/scripts/lint-warnings.sh b/.cursor/skills/im/scripts/lint-warnings.sh new file mode 100755 index 0000000..c124dc2 --- /dev/null +++ b/.cursor/skills/im/scripts/lint-warnings.sh @@ -0,0 +1,260 @@ +#!/usr/bin/env bash +# lint-warnings.sh +# Run eslint --fix on files and match any remaining findings to documented fix +# patterns. Detects files that will be "graduated" from the ESLint warning +# suppression list when committed, promoting their suppressed-rule warnings to +# errors so they can be fixed before commit. +# +# Usage: +# lint-warnings.sh <file1> [file2] ... +# +# Output: +# 1. Summary of auto-fixes applied (if any) +# 2. Graduation warnings (files that will be promoted to error severity) +# 3. Summary of remaining findings per rule/severity +# 4. Matched patterns from typescript-standards.mdc (full XML blocks) +# 5. Unmatched rules (need new patterns added) +# +# Exit codes: +# 0 - No remaining lint findings after auto-fix +# 1 - Remaining lint findings after auto-fix +# 2 - Error (missing files, eslint runtime/config failure, etc.) +set -euo pipefail + +PATTERNS_FILE="$HOME/.cursor/rules/typescript-standards.mdc" + +if [[ $# -eq 0 ]]; then + echo "Usage: lint-warnings.sh <file1> [file2] ..." >&2 + exit 2 +fi + +# Filter to existing .ts/.tsx files +FILES=() +for f in "$@"; do + if [[ ("$f" == *.ts || "$f" == *.tsx) && -f "$f" ]]; then + FILES+=("$f") + fi +done + +if [[ ${#FILES[@]} -eq 0 ]]; then + echo "No .ts/.tsx files found" >&2 + exit 2 +fi + +# Run eslint with --fix, then classify any remaining lint findings. +TMP_JSON="$(mktemp)" +TMP_ERR="$(mktemp)" +trap 'rm -f "$TMP_JSON" "$TMP_ERR"' EXIT + +set +e +./node_modules/.bin/eslint --fix --format json "${FILES[@]}" >"$TMP_JSON" 2>"$TMP_ERR" +ESLINT_EXIT=$? +set -e + +node -e ' +const fs = require("fs"); +const path = require("path"); + +const patternsFile = process.argv[1]; +const jsonFile = process.argv[2]; +const errFile = process.argv[3]; +const eslintExit = Number(process.argv[4]); + +let input = ""; +let stderrText = ""; +try { + input = fs.readFileSync(jsonFile, "utf8"); +} catch (error) { + console.error("Failed to read eslint JSON output"); + process.exit(2); +} + +try { + stderrText = fs.readFileSync(errFile, "utf8").trim(); +} catch (error) { + stderrText = ""; +} + +if (input.trim() === "") { + if (stderrText !== "") console.error(stderrText); + console.error("ESLint produced no JSON output"); + process.exit(2); +} + +let results; +try { + results = JSON.parse(input); +} catch (error) { + if (stderrText !== "") console.error(stderrText); + console.error("Failed to parse eslint output"); + process.exit(2); +} + +if (!Array.isArray(results)) { + console.error("Unexpected eslint JSON format"); + process.exit(2); +} + +// --- Graduation detection --- +// Parse eslint.config.mjs to find files in the warning-suppression list. +// These files currently have certain rules at "warn" severity, but committing +// them removes them from the list (via update-eslint-warnings), promoting +// those rules to "error". We detect this ahead of time so the agent can fix +// them in a lint-fix commit before the feature commit. +const GRADUATED_RULES = new Set([ + "@typescript-eslint/ban-ts-comment", + "@typescript-eslint/explicit-function-return-type", + "@typescript-eslint/strict-boolean-expressions", + "@typescript-eslint/use-unknown-in-catch-callback-variable" +]); + +const suppressedFiles = new Set(); +try { + const configPath = path.join(process.cwd(), "eslint.config.mjs"); + const configContent = fs.readFileSync(configPath, "utf8"); + // Extract file paths from the suppression block (single-quoted strings) + for (const m of configContent.matchAll(/^\s+\x27([^\x27]+)\x27,?\s*$/gm)) { + suppressedFiles.add(m[1]); + } +} catch (error) { + // No eslint.config.mjs or parse failure — skip graduation detection +} + +const findingsBySeverity = new Map([ + [2, new Map()], + [1, new Map()] +]); +let totalErrors = 0; +let totalWarnings = 0; +let graduatedCount = 0; +let autoFixedFiles = 0; + +for (const file of results) { + if (file != null && typeof file.output === "string") autoFixedFiles += 1; + + const rel = path.relative(process.cwd(), file.filePath); + const willGraduate = suppressedFiles.has(rel); + + for (const message of file.messages) { + if (message.severity !== 1 && message.severity !== 2) continue; + + const rule = message.ruleId || "unknown"; + + // Promote suppressed-rule warnings to errors for files that will graduate + let effectiveSeverity = message.severity; + if (willGraduate && message.severity === 1 && GRADUATED_RULES.has(rule)) { + effectiveSeverity = 2; + graduatedCount += 1; + } + + const findingsForSeverity = findingsBySeverity.get(effectiveSeverity); + if (!findingsForSeverity.has(rule)) { + findingsForSeverity.set(rule, []); + } + findingsForSeverity.get(rule).push({ + file: rel, + line: message.line, + message: message.message + }); + + if (effectiveSeverity === 2) totalErrors += 1; + else totalWarnings += 1; + } +} + +if (eslintExit > 1 && totalErrors === 0 && totalWarnings === 0) { + if (stderrText !== "") console.error(stderrText); + console.error("ESLint failed before reporting lint findings"); + process.exit(2); +} + +if (autoFixedFiles > 0) { + console.log(`>> Auto-fixed ${autoFixedFiles} file(s)`); +} + +if (graduatedCount > 0) { + console.log(`>> ${graduatedCount} warning(s) promoted to errors (graduation: file will be removed from suppression list on commit)`); +} + +if (totalErrors === 0 && totalWarnings === 0) { + console.log(">> No remaining lint findings"); + process.exit(0); +} + +let patternsContent = ""; +try { + patternsContent = fs.readFileSync(patternsFile, "utf8"); +} catch (error) { + console.error("Warning: Could not read patterns file:", patternsFile); +} + +const patternRegex = /<pattern\s+id="([^"]+)"\s+rule="([^"]+)">([\s\S]*?)<\/pattern>/g; +const patterns = new Map(); +let match; +while ((match = patternRegex.exec(patternsContent)) !== null) { + const [fullMatch, id, rule] = match; + if (!patterns.has(rule)) { + patterns.set(rule, []); + } + patterns.get(rule).push({ id, fullMatch }); +} + +if (totalErrors > 0) { + console.log(`>> ${totalErrors} remaining error(s)`); +} +if (totalWarnings > 0) { + console.log(`>> ${totalWarnings} remaining warning(s)`); +} + +const printFindings = (heading, findingsByRule) => { + if (findingsByRule.size === 0) return; + + console.log(`\n=== ${heading} ===`); + for (const [rule, instances] of [...findingsByRule.entries()].sort((left, right) => right[1].length - left[1].length)) { + console.log(`\n${rule} (${instances.length}x):`); + for (const inst of instances.slice(0, 3)) { + console.log(` ${inst.file}:${inst.line} - ${inst.message}`); + } + if (instances.length > 3) { + console.log(` ... and ${instances.length - 3} more`); + } + } +}; + +printFindings("Remaining Errors by Rule", findingsBySeverity.get(2)); +printFindings("Remaining Warnings by Rule", findingsBySeverity.get(1)); + +const matchedRules = []; +const unmatchedRules = []; +const seenRules = new Set(); +for (const findingsByRule of findingsBySeverity.values()) { + for (const rule of findingsByRule.keys()) { + if (seenRules.has(rule)) continue; + seenRules.add(rule); + if (patterns.has(rule)) { + matchedRules.push(rule); + } else { + unmatchedRules.push(rule); + } + } +} + +if (matchedRules.length > 0) { + console.log("\n\n=== Matched Fix Patterns ==="); + for (const rule of matchedRules) { + for (const pattern of patterns.get(rule)) { + console.log(`\n${pattern.fullMatch}`); + } + } +} + +if (unmatchedRules.length > 0) { + console.log("\n\n=== Unmatched Rules (need patterns added) ==="); + for (const rule of unmatchedRules) { + console.log(`- ${rule}`); + } + console.log("\nAfter fixing these, add patterns to ~/.cursor/rules/typescript-standards.mdc"); +} + +process.exit(1); +' -- "$PATTERNS_FILE" "$TMP_JSON" "$TMP_ERR" "$ESLINT_EXIT" diff --git a/.cursor/skills/im/scripts/reorder-commits.sh b/.cursor/skills/im/scripts/reorder-commits.sh new file mode 100755 index 0000000..700c285 --- /dev/null +++ b/.cursor/skills/im/scripts/reorder-commits.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +# reorder-commits.sh +# Reorder commits on a branch to a specified order using non-interactive rebase. +# +# Usage: +# reorder-commits.sh <base-branch> <hash1> <hash2> ... +# +# Arguments: +# base-branch The branch/ref to rebase onto (e.g., origin/develop) +# hash1..N Commit hashes in desired order (oldest to newest) +# +# The script verifies all hashes exist in base..HEAD, writes an awk-based +# GIT_SEQUENCE_EDITOR to reorder the pick lines, and runs git rebase -i. +# It verifies the tree is unchanged after rebase. +# +# Exit codes: +# 0 - Reorder successful +# 1 - Reorder failed (conflict, missing commits, tree mismatch) +set -euo pipefail + +if [[ $# -lt 3 ]]; then + echo "Usage: reorder-commits.sh <base-branch> <hash1> <hash2> ..." >&2 + exit 1 +fi + +BASE="$1" +shift +DESIRED_ORDER=("$@") + +# Remove stale index locks +rm -f .git/index.lock + +# Get short hashes for matching rebase todo lines +BRANCH_COMMITS=$(git log --reverse --format='%h' "$BASE..HEAD") +BRANCH_COUNT=$(echo "$BRANCH_COMMITS" | wc -l | tr -d ' ') +DESIRED_COUNT=${#DESIRED_ORDER[@]} + +if [[ "$BRANCH_COUNT" -ne "$DESIRED_COUNT" ]]; then + echo "Error: Branch has $BRANCH_COUNT commits but $DESIRED_COUNT hashes were provided" >&2 + echo "Branch commits: $BRANCH_COMMITS" >&2 + exit 1 +fi + +# Resolve desired hashes to short hashes and verify they're on the branch +DESIRED_SHORT=() +for hash in "${DESIRED_ORDER[@]}"; do + short=$(git rev-parse --short "$hash" 2>/dev/null) || { + echo "Error: Cannot resolve hash '$hash'" >&2 + exit 1 + } + if ! echo "$BRANCH_COMMITS" | grep -q "^${short}$"; then + echo "Error: Commit $short is not in $BASE..HEAD" >&2 + exit 1 + fi + DESIRED_SHORT+=("$short") +done + +# Capture pre-rebase tree for verification +PRE_TREE=$(git rev-parse HEAD^{tree}) + +# Build awk script that reorders pick lines to match desired order +# The awk program collects all pick lines, then outputs them in the order +# specified by the DESIRED env var (space-separated short hashes) +EDITOR_SCRIPT=$(mktemp) +trap 'rm -f "$EDITOR_SCRIPT"' EXIT + +cat > "$EDITOR_SCRIPT" << 'AWKSCRIPT' +#!/usr/bin/env bash +exec awk -v desired="$DESIRED" ' +BEGIN { + n = split(desired, order, " ") +} +/^pick / { + hash = $2 + lines[hash] = $0 + next +} +/^$/ || /^#/ { next } +END { + for (i = 1; i <= n; i++) { + for (h in lines) { + if (index(h, order[i]) == 1 || index(order[i], h) == 1) { + print lines[h] + break + } + } + } +} +' "$1" > "$1.tmp" && mv "$1.tmp" "$1" +AWKSCRIPT +chmod +x "$EDITOR_SCRIPT" + +export DESIRED="${DESIRED_SHORT[*]}" +if GIT_SEQUENCE_EDITOR="$EDITOR_SCRIPT" git rebase -i "$BASE" 2>/dev/null; then + POST_TREE=$(git rev-parse HEAD^{tree}) + if [[ "$PRE_TREE" == "$POST_TREE" ]]; then + echo ">> Commits reordered successfully" + git log --oneline "$BASE..HEAD" + else + echo "Error: Tree changed after reorder (pre: $PRE_TREE, post: $POST_TREE)" >&2 + echo "This indicates content was lost or modified during rebase." >&2 + exit 1 + fi +else + git rebase --abort 2>/dev/null || true + echo "Error: Rebase failed (likely conflict). Aborted." >&2 + exit 1 +fi diff --git a/.cursor/skills/install-deps.sh b/.cursor/skills/install-deps.sh new file mode 100755 index 0000000..fbe959e --- /dev/null +++ b/.cursor/skills/install-deps.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -euo pipefail + +# install-deps.sh — Install dependencies and run prepare script. +# Usage: install-deps.sh [repo-dir] +# +# Runs `yarn install` and `yarn prepare` (if prepare script exists in package.json). +# Use after: branch creation, rebase onto upstream, checkout. +# +# Exit codes: +# 0 = Success (or no package.json — skipped) +# 1 = Install or prepare failed + +repo_dir="${1:-.}" + +if [ ! -f "$repo_dir/package.json" ]; then + echo "⏭ No package.json — skipping dependency install" >&2 + exit 0 +fi + +echo "Installing dependencies..." >&2 +(cd "$repo_dir" && yarn install) + +if (cd "$repo_dir" && node -e "process.exit(require('./package.json').scripts?.prepare ? 0 : 1)" 2>/dev/null); then + echo "Running prepare..." >&2 + (cd "$repo_dir" && yarn prepare) +fi + +echo "✓ Dependencies installed and prepared" >&2 diff --git a/.cursor/skills/lint-commit.sh b/.cursor/skills/lint-commit.sh new file mode 100755 index 0000000..8b3d993 --- /dev/null +++ b/.cursor/skills/lint-commit.sh @@ -0,0 +1,327 @@ +#!/usr/bin/env bash +# lint-commit.sh +# Lint-fix, verify, localize (if needed), and commit in one atomic step. +# +# Usage: +# lint-commit.sh -m "commit message" [file ...] +# lint-commit.sh --fixup <hash> [file ...] +# lint-commit.sh -m "fixup! Original commit" [file ...] # Auto-reorders +# +# Options: +# -m "msg" Commit message (mutually exclusive with --fixup) +# --fixup <hash> Create a fixup commit targeting <hash> +# --reorder After fixup commit, rebase to place it after its target (default: true) +# --no-reorder Skip the reorder rebase +# +# If files are given, they are the primary scope for linting/committing. +# The script may also auto-include generated companion files like: +# - src/locales/strings +# - eslint.config.mjs +# - __snapshots__/*.snap +# Any additional non-generated files are reported before commit. +# If no files are given, all staged + unstaged + untracked changes are used. +# The script will: +# 1. Run eslint --fix on .ts/.tsx files +# 2. Run eslint --quiet to verify no remaining errors (exits 1 if any) +# 2b. Check for new warnings on changed lines (exits 1 if any) +# 3. Run yarn localize if the project has a localize script +# 4. git add -A && git commit --no-verify +# 5. Run yarn test --findRelatedTests -u on committed .ts/.tsx files +# 6. If snapshots changed, amend the commit to include them +# 7. If commit is a fixup (--fixup or -m "fixup! ..."), reorder via rebase +set -euo pipefail + +MESSAGE="" +FIXUP="" +REORDER="true" # Default to reordering fixups +FILES=() +PRIMARY_SCOPE_DECLARED="false" + +while [[ $# -gt 0 ]]; do + case "$1" in + -m) + MESSAGE="$2" + shift 2 + ;; + --fixup) + FIXUP="$2" + shift 2 + ;; + --reorder) + REORDER="true" + shift + ;; + --no-reorder) + REORDER="false" + shift + ;; + *) + FILES+=("$1") + shift + ;; + esac +done + +if [[ ${#FILES[@]} -gt 0 ]]; then + PRIMARY_SCOPE_DECLARED="true" +fi + +if [[ -z "$MESSAGE" && -z "$FIXUP" ]]; then + echo "Error: -m \"commit message\" or --fixup <hash> is required" >&2 + exit 1 +fi +if [[ -n "$MESSAGE" && -n "$FIXUP" ]]; then + echo "Error: -m and --fixup are mutually exclusive" >&2 + exit 1 +fi + +# If no files specified, collect all changed/untracked files +if [[ ${#FILES[@]} -eq 0 ]]; then + while IFS= read -r f; do + [[ -n "$f" ]] && FILES+=("$f") + done < <(git diff --name-only HEAD 2>/dev/null; git diff --name-only --cached 2>/dev/null; git ls-files --others --exclude-standard 2>/dev/null) + + # Deduplicate (compatible with macOS Bash 3.2 — no mapfile) + if [[ ${#FILES[@]} -gt 0 ]]; then + DEDUPED=() + while IFS= read -r f; do + [[ -n "$f" ]] && DEDUPED+=("$f") + done < <(printf '%s\n' "${FILES[@]}" | sort -u) + FILES=("${DEDUPED[@]}") + fi +fi + +if [[ ${#FILES[@]} -eq 0 ]]; then + echo "Error: No changed files found" >&2 + exit 1 +fi + +# Filter to lintable files (.ts/.tsx) that exist on disk +LINT_FILES=() +for f in "${FILES[@]}"; do + if [[ ("$f" == *.ts || "$f" == *.tsx) && -f "$f" ]]; then + LINT_FILES+=("$f") + fi +done + +# Step 1: eslint --fix +if [[ ${#LINT_FILES[@]} -gt 0 ]]; then + echo ">> eslint --fix (${#LINT_FILES[@]} files)" + ./node_modules/.bin/eslint --fix "${LINT_FILES[@]}" || true + + # Step 2: eslint --quiet (must pass) + echo ">> eslint --quiet (verify)" + if ! ./node_modules/.bin/eslint --quiet "${LINT_FILES[@]}"; then + echo "Error: Lint errors remain after --fix. Aborting commit." >&2 + exit 1 + fi + echo ">> Lint clean" + + # Step 2b: Detect new warnings introduced on changed lines. + # Runs eslint (with warnings) and cross-references against git diff to + # only flag warnings on lines the developer actually touched. + NEW_WARN=$(node -e ' +const { execSync } = require("child_process") +const path = require("path") + +const files = process.argv.slice(1) +const cmd = "./node_modules/.bin/eslint --format json " + files.map(f => JSON.stringify(f)).join(" ") + +let results +try { + results = JSON.parse(execSync(cmd, { encoding: "utf8", maxBuffer: 10 * 1024 * 1024 })) +} catch (e) { + if (e.stdout) try { results = JSON.parse(e.stdout) } catch { process.exit(0) } + else process.exit(0) +} + +const cwd = process.cwd() +const out = [] + +for (const r of results) { + const rel = path.relative(cwd, r.filePath) + const warns = r.messages.filter(m => m.severity === 1) + if (warns.length === 0) continue + + // Determine which lines were changed in this file + let changed + try { + execSync("git cat-file -e HEAD:" + JSON.stringify(rel), { stdio: "pipe" }) + const diff = execSync("git diff -U0 HEAD -- " + JSON.stringify(rel), { encoding: "utf8" }) + changed = new Set() + for (const m of diff.matchAll(/@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@/g)) { + const start = +m[1] + const count = m[2] != null ? +m[2] : 1 + for (let i = start; i < start + count; i++) changed.add(i) + } + } catch { + changed = null // New file — all lines count as changed + } + + for (const w of warns) { + if (changed == null || changed.has(w.line)) { + out.push(rel + ":" + w.line + ":" + w.column + " warning " + w.message + " " + w.ruleId) + } + } +} + +if (out.length > 0) console.log(out.join("\n")) +' -- "${LINT_FILES[@]}" 2>/dev/null || true) + + if [[ -n "$NEW_WARN" ]]; then + echo ">> New warnings on changed lines:" >&2 + echo "$NEW_WARN" >&2 + echo "Error: Fix new warnings before committing." >&2 + exit 1 + fi +fi + +# Step 3: yarn localize if the project has a localize script +if node -e "process.exit(require('./package.json').scripts?.localize ? 0 : 1)" 2>/dev/null; then + echo ">> yarn localize" + yarn localize +fi + +# Step 4: Stage files and report effective commit scope +if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then + echo ">> git add (scoped) && git commit" + git add -- "${FILES[@]}" + # Stage generated companion files if they have changes + for companion in eslint.config.mjs; do + if [[ -f "$companion" ]] && ! git diff --quiet -- "$companion" 2>/dev/null; then + git add -- "$companion" + fi + done + # Stage locales/strings if yarn localize changed them (already git-added by + # yarn localize in some repos, but ensure they're staged) + if git diff --quiet --cached -- src/locales/strings 2>/dev/null; then + git diff --quiet -- src/locales/strings 2>/dev/null || git add -- src/locales/strings/ 2>/dev/null || true + fi +else + echo ">> git add -A && git commit" + git add -A +fi + +# Graduate files from eslint warning-override list if the repo has the script +if node -e "process.exit(require('./package.json').scripts?.['update-eslint-warnings'] ? 0 : 1)" 2>/dev/null; then + echo ">> update-eslint-warnings" + npm run --silent update-eslint-warnings +fi + +if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then + echo ">> commit scope report" + node -e ' +const { execSync } = require("child_process") + +const requested = [...new Set(process.argv.slice(1))].sort() +const staged = execSync("git diff --cached --name-only --diff-filter=ACMRD", { + encoding: "utf8" +}) + .split("\n") + .map(line => line.trim()) + .filter(Boolean) + .sort() + +const requestedSet = new Set(requested) +const isGeneratedCompanion = file => { + return ( + file === "eslint.config.mjs" || + file === "src/locales/strings" || + /(^|\/)__snapshots__\/.*\.snap$/.test(file) + ) +} + +const requestedStaged = [] +const generatedStaged = [] +const extraStaged = [] +for (const file of staged) { + if (requestedSet.has(file)) { + requestedStaged.push(file) + } else if (isGeneratedCompanion(file)) { + generatedStaged.push(file) + } else { + extraStaged.push(file) + } +} + +const missingRequested = requested.filter(file => !staged.includes(file)) + +const printGroup = (title, files) => { + if (files.length === 0) return + console.log(title) + for (const file of files) console.log("- " + file) +} + +printGroup("Primary scope staged:", requestedStaged) +printGroup("Auto-generated companion files staged:", generatedStaged) +printGroup("Additional non-generated files staged:", extraStaged) +printGroup("Requested files not staged:", missingRequested) + +if (extraStaged.length > 0) { + console.log("Proceeding with additional non-generated files by default.") +} +' -- "${FILES[@]}" +fi + +if [[ -n "$FIXUP" ]]; then + git commit --no-verify --fixup "$FIXUP" +else + git commit --no-verify -m "$MESSAGE" +fi + +# Step 5: Update snapshots for related tests (Jest only) +if [[ ${#LINT_FILES[@]} -gt 0 && -x ./node_modules/.bin/jest ]]; then + echo ">> jest --findRelatedTests -u (${#LINT_FILES[@]} files)" + ./node_modules/.bin/jest --findRelatedTests "${LINT_FILES[@]}" -u 2>&1 || true + + # Step 6: If snapshots changed, amend the commit + SNAP_CHANGES=$(git diff --name-only -- '**/__snapshots__/**' 2>/dev/null || true) + if [[ -n "$SNAP_CHANGES" ]]; then + echo ">> Snapshots updated, amending commit:" + echo "$SNAP_CHANGES" + if [[ "$PRIMARY_SCOPE_DECLARED" == "true" ]]; then + echo ">> Auto-generated companion files staged:" + echo "$SNAP_CHANGES" + fi + git add -- $SNAP_CHANGES + git commit --amend --no-edit --no-verify + else + echo ">> No snapshot changes" + fi +fi + +# Step 7: Reorder fixup commits to be adjacent to their targets +# Detects fixup commits by --fixup flag or "fixup! " prefix in message +IS_FIXUP="false" +if [[ -n "$FIXUP" ]]; then + IS_FIXUP="true" +elif [[ "$MESSAGE" == fixup!* ]]; then + IS_FIXUP="true" +fi + +if [[ "$IS_FIXUP" == "true" && "$REORDER" == "true" ]]; then + echo ">> Reordering fixup commit..." + + # Find the merge-base with the default upstream branch + DEFAULT_UPSTREAM=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null \ + || echo "origin/$(git remote show origin 2>/dev/null | sed -n '/HEAD branch/s/.*: //p')" \ + || echo "origin/master") + + BASE=$(git merge-base "$DEFAULT_UPSTREAM" HEAD 2>/dev/null || echo "") + + if [[ -n "$BASE" ]]; then + # Interactive rebase with autosquash to reorder (editor does nothing, so commits aren't squashed) + if GIT_EDITOR=true git -c sequence.editor=: rebase -i "$BASE" --autosquash 2>/dev/null; then + echo ">> Fixup reordered successfully" + else + # Rebase failed (likely conflict) - abort and warn + git rebase --abort 2>/dev/null || true + echo ">> Warning: Could not reorder fixup (conflict). Fixup remains at HEAD." >&2 + echo ">> Run 'git rebase -i --autosquash $BASE' manually to reorder." >&2 + fi + else + echo ">> Warning: Could not determine merge-base for reorder" >&2 + fi +fi + +echo ">> Done" diff --git a/.cursor/skills/one-shot/SKILL.md b/.cursor/skills/one-shot/SKILL.md new file mode 100644 index 0000000..e2b84c1 --- /dev/null +++ b/.cursor/skills/one-shot/SKILL.md @@ -0,0 +1,70 @@ +--- +name: one-shot +description: End-to-end flow for a task: plan/context, implementation, PR creation, and Asana attach/assign in one command. +compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana integration. ASANA_GITHUB_SECRET for PR attachment. +metadata: + author: j0ntz +--- + +<goal>Run the full legacy-style task-to-PR workflow in one command by orchestrating `/asana-plan`, `/im`, and `/pr-create`.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="orchestrate-existing-skills">Do not re-implement logic already defined in `/asana-plan`, `/im`, or `/pr-create`. Delegate to those skills.</rule> +<rule id="attach-and-assign-default">By default, invoke `/pr-create` with both `--asana-attach` and `--asana-assign`.</rule> +<rule id="hands-off-assignment">This workflow is hands-off. If reviewer assignment cannot be resolved from task state or explicit input, let `/pr-create` skip assignment rather than pausing for reviewer input.</rule> +<rule id="task-gid-required-for-asana-flags">If Asana attach/assign flags are active, a task GID must be available from the Asana URL input or explicit `--asana-task` flag; otherwise fail fast.</rule> +<rule id="no-script-bypass">If any delegated skill or companion script fails, report and stop. Do not bypass with manual alternatives.</rule> +<rule id="pr-body-owned-by-pr-create">Do not draft alternate PR markdown formats inside this workflow. `/pr-create` owns PR body generation and template compliance.</rule> +</rules> + +<step id="1" name="Collect input"> +Accept one of: + +1. Asana task URL +2. Text/file requirements + +Optional flags: + +- `--asana-task <gid>` (explicit Asana GID override) +- `--no-asana-attach` +- `--no-asana-assign` +</step> + +<step id="2" name="Plan/context phase"> +Run `/asana-plan` with the provided input mode: + +- Asana URL mode: fetch task context and create plan +- Text/file mode: create plan from provided requirements + +Wait for user confirmation handled by `/asana-plan`. +</step> + +<step id="3" name="Implementation phase"> +Run `/im` using the approved `/asana-plan` output. +</step> + +<step id="4" name="PR phase"> +Run `/pr-create` with defaults: + +- include `--asana-attach` unless `--no-asana-attach` +- include `--asana-assign` unless `--no-asana-assign` + +Task GID source priority: + +1. explicit `--asana-task <gid>` +2. Asana task URL from step 1 +3. chat context from prior steps +</step> + +<step id="5" name="Report"> +Return the final PR URL and which delegated phases ran: + +- planning: `/asana-plan` +- implementation: `/im` +- PR creation: `/pr-create` +</step> + +<edge-cases> +<case name="No Asana input with attach/assign enabled">Fail fast and ask for `--asana-task <gid>` or disable flags with `--no-asana-attach` / `--no-asana-assign`.</case> +<case name="Ad-hoc text task">Allow workflow with `--no-asana-attach --no-asana-assign` when no task link/GID exists.</case> +</edge-cases> diff --git a/.cursor/skills/pr-address/SKILL.md b/.cursor/skills/pr-address/SKILL.md new file mode 100644 index 0000000..f5972cd --- /dev/null +++ b/.cursor/skills/pr-address/SKILL.md @@ -0,0 +1,199 @@ +--- +name: pr-address +description: Address PR feedback with fixup commits, resolving each comment after replying. Use when the user wants to address review comments on a pull request. +compatibility: Requires git, gh. +metadata: + author: j0ntz +--- + +<goal>Address PR feedback with fixup commits, resolving each comment after replying with how it was addressed.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="use-companion-script">Do NOT call `gh` directly. Use `~/.cursor/skills/pr-address/scripts/pr-address.sh` for all GitHub API interactions (it uses `gh` internally).</rule> +<rule id="no-script-bypass">If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or other workarounds.</rule> +<rule id="no-git-editor">All git commands that may open an editor (`rebase --continue`, `commit` without `-m`) MUST be prefixed with `GIT_EDITOR=true` to prevent blocking on `COMMIT_EDITMSG` in the IDE.</rule> +<rule id="no-gitkraken">NEVER use `git_log_or_diff:GitKraken`. Use local `git` commands directly.</rule> +<rule id="this-file-wins">If any other instruction conflicts with this file, **this file wins** for `pr-address`.</rule> +<rule id="commit-via-script">Commit fixups using `~/.cursor/skills/lint-commit.sh --no-reorder -m "fixup! {headline}" [files...]`. `--no-reorder` is required — the default reorder runs `rebase --autosquash` which squashes fixups immediately, conflicting with step 4's conditional autosquash. Do NOT manually run eslint — the commit script handles it.</rule> +<rule id="script-timeouts">GitHub API scripts can take up to 30s. Set `block_until_ms: 60000` when invoking `pr-address.sh`.</rule> +<rule id="reply-before-resolve">ALWAYS reply explaining how a comment was addressed BEFORE resolving or marking it. No silent resolutions.</rule> +<rule id="resolution-source-of-truth">Only explicitly resolved threads (`isResolved: true`) or `<!-- addressed:... -->` markers count as resolved. Recency (commits after a comment) does NOT mean resolved.</rule> +</rules> + +<step id="1" name="Fetch all unresolved feedback and PR body"> +Always fetch live from GitHub. Run both in parallel: + +```bash +# Fetch unresolved feedback +~/.cursor/skills/pr-address/scripts/pr-address.sh fetch --owner <OWNER> --repo <REPO> --pr <NUMBER> + +# Populate /tmp/pr-body.md from the live PR body (source of truth) +~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-pr-body --owner <OWNER> --repo <REPO> --pr <NUMBER> +``` + +If either script exits code 2 with `PROMPT_GH_AUTH`, prompt: "`gh` CLI is not authenticated. Please run: `gh auth login`" + +The `fetch` output contains: +- **prAuthor**: The PR author's GitHub username +- **currentUser**: Your GitHub username (the authenticated `gh` user) +- **hasHumanReviewers**: `true` if any external human reviewer (not `currentUser`, not bots) has commented — used for autosquash decision +- **humanReviewers**: List of external human reviewer usernames +- **threads**: All unresolved inline review threads (includes comments from `currentUser` for context) +- **reviewBodies**: Latest review body per non-author reviewer (excludes `prAuthor` and bots) +- **topLevel**: Top-level comments (excludes `prAuthor` and bots) + +To inspect a specific inline thread, including an already-resolved one, use: + +```bash +~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-thread \ + --owner <OWNER> --repo <REPO> --pr <NUMBER> \ + --thread-id "<PRRT_threadNodeId>" +``` + +The `fetch-pr-body` call writes the current PR body to `/tmp/pr-body.md`. This file is available for editing throughout the session. If you need to update the PR body (e.g. to revise the description after addressing feedback), edit `/tmp/pr-body.md` via the Write tool and push it back: + +```bash +gh pr edit <NUMBER> --body-file /tmp/pr-body.md +``` +</step> + +<step id="2" name="Process all unresolved feedback"> +Address every item returned by `fetch`. Group inline threads by file. If the user provided specific files, scope to those only. + +<sub-step name="Determine fixup target"> +Ask: **"Which commit introduced the behavior/code this comment is about?"** + +- List commits touching the file: `git log --oneline -- <file>` +- A specific line/function → fixup the commit that introduced it +- A missing feature/behavior → fixup the commit that should have included it +- A pattern/style issue → fixup the earliest commit where it appears +- Ambiguous → ask the user + +Get the target commit headline: +```bash +git log -1 --format='%s' <commit_sha> +``` +</sub-step> + +<sub-step name="Apply fixes"> +1. Read each file with comments +2. Apply changes — comment hunks can be narrower than intent; apply consistently within the function/file +3. Commit using `lint-commit.sh`: + ```bash + ~/.cursor/skills/lint-commit.sh --no-reorder -m "fixup! {targetHeadline}" [files...] + ``` +</sub-step> + +<sub-step name="Push fixup commits"> +After all fixup commits are created, push to the remote so the reviewer can see the changes referenced in replies: + +```bash +git push +``` +</sub-step> +</step> + +<step id="3" name="Reply and resolve each comment"> +After fixing, reply to every processed comment — addressed or rejected — then resolve it. + +<sub-step name="Inline threads (reply → resolve)"> +If a later fix may affect an already-addressed inline thread, inspect the thread first: + +```bash +~/.cursor/skills/pr-address/scripts/pr-address.sh fetch-thread \ + --owner <OWNER> --repo <REPO> --pr <NUMBER> \ + --thread-id "<PRRT_threadNodeId>" +``` + +Use the returned history to decide whether the existing reply still fully reflects the latest fix. If it does not, add one new factual follow-up reply. Multiple replies in the same thread are acceptable when they capture materially new fixes. + +1. Reply to the first comment in the thread: + ```bash + ~/.cursor/skills/pr-address/scripts/pr-address.sh reply \ + --owner <OWNER> --repo <REPO> --pr <NUMBER> \ + --comment-id <NUMERIC_ID> --body "<what was fixed>" + ``` + + If the comment ID is a GraphQL node ID, resolve to numeric first: + ```bash + ~/.cursor/skills/pr-address/scripts/pr-address.sh resolve-id \ + --owner <OWNER> --repo <REPO> --pr <NUMBER> \ + --node-id "<PRRC_nodeId>" + ``` + +2. Then mark the thread as resolved: + ```bash + ~/.cursor/skills/pr-address/scripts/pr-address.sh resolve-thread --thread-id "<PRRT_threadNodeId>" + ``` +</sub-step> + +<sub-step name="Review bodies and top-level comments (reply → mark addressed)"> +These have no native resolution mechanism. Post a top-level comment with a machine-readable marker: + +```bash +~/.cursor/skills/pr-address/scripts/pr-address.sh mark-addressed \ + --owner <OWNER> --repo <REPO> --pr <NUMBER> \ + --type <review|comment> --target-id <NUMERIC_ID> \ + --body "<what was fixed>" +``` + +The script appends `<!-- addressed:review:ID -->` or `<!-- addressed:comment:ID -->` to the body. Subsequent `fetch` calls detect these markers and exclude already-addressed items. + +**Skip bot-only no-op items**: If a review body or top-level comment is from a bot user (e.g., `cursor`, `chatgpt-codex-connector`) AND contains no inline threads with actionable suggestions — only a summary or status message — do NOT post a `mark-addressed` comment. Human reviewer items must always be addressed or rejected, even terse ones like "This needs work". +</sub-step> + +<sub-step name="Reply guidelines"> +- **Addressed**: State what was fixed. Factual, 1 sentence. +- **Invalid/false-positive**: Brief evidence citing code paths or logic. 1-3 sentences. +- No pleasantries. Factual tone only. +</sub-step> +</step> + +<step id="4" name="Autosquash (only when no external human reviewers)"> +Only autosquash if `hasHumanReviewers` is `false`. This means no external human reviewer (someone other than `currentUser`) has commented. + +Autosquash is **allowed** when only: +- Automated reviewers (`cursor`, `chatgpt-codex-connector`, or other bots) commented, OR +- `currentUser` commented (your own notes/action items) + +Autosquash is **blocked** when: +- Any external human reviewer has commented — they are actively reviewing and need to see the fixup commits + +If `hasHumanReviewers` is `true`, **do NOT autosquash**. Leave fixup commits visible for human reviewers to verify before squashing on merge. + +When autosquashing is allowed: +```bash +~/.cursor/skills/pr-address/scripts/pr-address.sh autosquash +``` + +If conflicts occur, resolve them, then: `GIT_EDITOR=true git rebase --continue`. If a commit becomes empty after squashing: `git rebase --skip`. + +Force push is required after autosquash because the rebase rewrites history: +```bash +git push --force-with-lease +``` +</step> + +<step id="5" name="Verification"> +Run full verification to catch issues introduced by fixup commits: + +```bash +~/.cursor/skills/verify-repo.sh . --base <upstream-ref> +``` + +Where `<upstream-ref>` is `origin/develop` for `edge-react-gui` or `origin/master` for other repos. Set `block_until_ms: 120000`. + +If verification fails, fix the issue with another fixup commit, then re-run verification. +</step> + +<step id="6" name="Post-processing"> +Propose modifications to `~/.cursor/rules/typescript-standards.mdc` to prevent similar review comments in the future. Prompt for confirmation before applying. +</step> + +<edge-cases> +<case name="No gh auth">Script exits code 2 with `PROMPT_GH_AUTH`. Prompt user to run `gh auth login` and STOP.</case> +<case name="No unresolved feedback">Report "No unresolved comments on this PR" and STOP.</case> +<case name="External human reviewer comments">Do NOT autosquash when `hasHumanReviewers` is true. Leave fixup commits for the external reviewer to verify, then squash on merge.</case> +<case name="Comment already addressed in code">If the current code already handles the feedback (e.g., from a previous fixup), still reply explaining this and resolve/mark the comment. Do not leave it unresolved.</case> +<case name="Already resolved thread needs follow-up">Fetch the thread history first. If the prior reply no longer reflects the latest fix, post one additional factual follow-up reply. Do not edit or delete prior replies in this workflow.</case> +</edge-cases> diff --git a/.cursor/skills/pr-address/scripts/pr-address.sh b/.cursor/skills/pr-address/scripts/pr-address.sh new file mode 100755 index 0000000..fd4bcd6 --- /dev/null +++ b/.cursor/skills/pr-address/scripts/pr-address.sh @@ -0,0 +1,331 @@ +#!/usr/bin/env bash +# pr-address.sh +# Companion script for pr-address.md +# Handles deterministic operations: comment fetching, replies, thread resolution, autosquash. +# +# Subcommands: +# fetch --owner <o> --repo <r> --pr <n> Fetch all unresolved feedback via GraphQL +# fetch-thread --owner <o> --repo <r> --pr <n> --thread-id <id> +# reply --owner <o> --repo <r> --pr <n> --comment-id <id> --body <text> +# resolve-thread --thread-id <node_id> Mark inline thread as resolved (GraphQL) +# mark-addressed --owner <o> --repo <r> --pr <n> --type <review|comment> --target-id <id> --body <text> +# resolve-id --owner <o> --repo <r> --pr <n> --node-id <id> +# headline --owner <o> --repo <r> --sha <sha> +# fetch-pr-body --owner <o> --repo <r> --pr <n> Fetch current PR body → /tmp/pr-body.md +# autosquash Rebase --autosquash from merge-base +# +# Exit codes: 0 = success, 1 = error, 2 = needs user input (e.g. gh not authenticated) +set -euo pipefail + +CMD="${1:-}" +shift || true + +OWNER="" REPO="" PR="" COMMENT_ID="" NODE_ID="" BODY="" SHA="" THREAD_ID="" TARGET_TYPE="" TARGET_ID="" +while [[ $# -gt 0 ]]; do + case "$1" in + --owner) OWNER="$2"; shift 2 ;; + --repo) REPO="$2"; shift 2 ;; + --pr) PR="$2"; shift 2 ;; + --comment-id) COMMENT_ID="$2"; shift 2 ;; + --node-id) NODE_ID="$2"; shift 2 ;; + --body) BODY="$2"; shift 2 ;; + --sha) SHA="$2"; shift 2 ;; + --thread-id) THREAD_ID="$2"; shift 2 ;; + --type) TARGET_TYPE="$2"; shift 2 ;; + --target-id) TARGET_ID="$2"; shift 2 ;; + *) echo "Unknown arg: $1" >&2; exit 1 ;; + esac +done + +require_gh() { + if ! command -v gh &>/dev/null; then + echo "PROMPT_GH_INSTALL" >&2; exit 2 + fi + if ! gh auth status &>/dev/null 2>&1; then + echo "PROMPT_GH_AUTH" >&2; exit 2 + fi +} + +case "$CMD" in + fetch) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then + echo "Error: --owner, --repo, --pr required" >&2; exit 1 + fi + + gh api graphql \ + -f query='query($owner: String!, $repo: String!, $number: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $number) { + author { login } + headRefName + baseRefName + reviewThreads(first: 100) { + nodes { + id + isResolved + comments(first: 50) { + nodes { + databaseId + createdAt + author { login } + path + line + body + } + } + } + } + reviews(last: 50) { + nodes { + databaseId + author { login } + state + body + submittedAt + } + } + comments(last: 50) { + nodes { + databaseId + createdAt + author { login } + body + } + } + } + } + }' \ + -f owner="$OWNER" -f repo="$REPO" -F number="$PR" \ + | GH_USER=$(gh api user --jq '.login') node -e " + const fs = require('fs') + const data = JSON.parse(fs.readFileSync('/dev/stdin', 'utf8')) + const pr = data.data.repository.pullRequest + const prAuthor = pr.author?.login + const currentUser = process.env.GH_USER + + const addressedIds = new Set() + for (const c of pr.comments.nodes) { + for (const m of (c.body || '').matchAll(/<!-- addressed:(?:review|comment):(\d+) -->/g)) { + addressedIds.add(Number(m[1])) + } + } + + const isBot = u => !u || u.includes('[bot]') || u === 'cursor' + const isAutomatedReviewer = u => isBot(u) || u === 'chatgpt-codex-connector' + + const threads = pr.reviewThreads.nodes + .filter(t => !t.isResolved) + .map(t => ({ + threadId: t.id, + path: t.comments.nodes[0]?.path, + line: t.comments.nodes[0]?.line, + comments: t.comments.nodes.map(c => ({ + id: c.databaseId, + user: c.author?.login, + body: c.body, + createdAt: c.createdAt + })) + })) + + // Check if any human (non-bot, non-automated, non-currentUser) reviewer has commented + // prAuthor CAN be an external human reviewer if they're not currentUser + const humanCommenters = new Set() + for (const t of threads) { + for (const c of t.comments) { + if (c.user && !isAutomatedReviewer(c.user) && c.user !== currentUser) { + humanCommenters.add(c.user) + } + } + } + + const latestByUser = {} + for (const r of pr.reviews.nodes) { + const user = r.author?.login + if (!user || user === prAuthor || r.state === 'PENDING' || isBot(user)) continue + const prev = latestByUser[user] + if (!prev || new Date(r.submittedAt) > new Date(prev.submittedAt)) { + latestByUser[user] = r + } + if (!isAutomatedReviewer(user) && user !== currentUser) { + humanCommenters.add(user) + } + } + const reviewBodies = Object.entries(latestByUser) + .filter(([, r]) => r.body?.trim() && !addressedIds.has(r.databaseId)) + .map(([user, r]) => ({ + reviewId: r.databaseId, user, state: r.state, + body: r.body, submittedAt: r.submittedAt + })) + + const topLevel = pr.comments.nodes.filter(c => { + const user = c.author?.login + if (!user || user === prAuthor || isBot(user)) return false + if ((c.body || '').includes('<!-- addressed:')) return false + if (!isAutomatedReviewer(user) && user !== currentUser) { + humanCommenters.add(user) + } + return !addressedIds.has(c.databaseId) + }).map(c => ({ + id: c.databaseId, user: c.author?.login, + body: c.body, createdAt: c.createdAt + })) + + console.log(JSON.stringify({ + prAuthor, currentUser, headRef: pr.headRefName, baseRef: pr.baseRefName, + hasHumanReviewers: humanCommenters.size > 0, + humanReviewers: Array.from(humanCommenters), + threads, reviewBodies, topLevel + }, null, 2)) + " + ;; + + fetch-thread) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" || -z "$THREAD_ID" ]]; then + echo "Error: --owner, --repo, --pr, --thread-id required" >&2; exit 1 + fi + + gh api graphql \ + -f query='query($owner: String!, $repo: String!, $number: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $number) { + reviewThreads(first: 100) { + nodes { + id + isResolved + comments(first: 50) { + nodes { + databaseId + createdAt + author { login } + path + line + body + } + } + } + } + } + } + }' \ + -f owner="$OWNER" -f repo="$REPO" -F number="$PR" \ + | GH_THREAD_ID="$THREAD_ID" node -e " + const fs = require('fs') + const data = JSON.parse(fs.readFileSync('/dev/stdin', 'utf8')) + const threads = data.data.repository.pullRequest.reviewThreads.nodes + const thread = threads.find(item => item.id === process.env.GH_THREAD_ID) + if (thread == null) { + console.error('Thread not found: ' + process.env.GH_THREAD_ID) + process.exit(1) + } + + console.log(JSON.stringify({ + threadId: thread.id, + isResolved: thread.isResolved, + path: thread.comments.nodes[0]?.path ?? null, + line: thread.comments.nodes[0]?.line ?? null, + comments: thread.comments.nodes.map(comment => ({ + id: comment.databaseId, + user: comment.author?.login ?? null, + body: comment.body, + createdAt: comment.createdAt + })) + }, null, 2)) + " + ;; + + reply) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" || -z "$COMMENT_ID" || -z "$BODY" ]]; then + echo "Error: --owner, --repo, --pr, --comment-id, --body required" >&2; exit 1 + fi + RESULT=$(echo '{}' | jq --arg body "$BODY" '{body: $body}' | \ + gh api "repos/$OWNER/$REPO/pulls/$PR/comments/$COMMENT_ID/replies" \ + -X POST --input -) + ID=$(echo "$RESULT" | jq -r '.id // empty') + if [[ -n "$ID" ]]; then + echo "replied: $ID" + else + echo "Reply failed: $RESULT" >&2; exit 1 + fi + ;; + + resolve-thread) + require_gh + if [[ -z "$THREAD_ID" ]]; then + echo "Error: --thread-id required" >&2; exit 1 + fi + RESULT=$(gh api graphql \ + -f query='mutation($id: ID!) { resolveReviewThread(input: {threadId: $id}) { thread { id isResolved } } }' \ + -f id="$THREAD_ID") + RESOLVED=$(echo "$RESULT" | jq -r '.data.resolveReviewThread.thread.isResolved // empty') + if [[ "$RESOLVED" == "true" ]]; then + echo "resolved: $THREAD_ID" + else + echo "Resolve failed: $RESULT" >&2; exit 1 + fi + ;; + + mark-addressed) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" || -z "$TARGET_TYPE" || -z "$TARGET_ID" || -z "$BODY" ]]; then + echo "Error: --owner, --repo, --pr, --type, --target-id, --body required" >&2; exit 1 + fi + MARKER="<!-- addressed:${TARGET_TYPE}:${TARGET_ID} -->" + FULL_BODY="${BODY} ${MARKER}" + RESULT=$(echo '{}' | jq --arg body "$FULL_BODY" '{body: $body}' | \ + gh api "repos/$OWNER/$REPO/issues/$PR/comments" -X POST --input -) + ID=$(echo "$RESULT" | jq -r '.id // empty') + if [[ -n "$ID" ]]; then + echo "marked: $ID ($MARKER)" + else + echo "Mark failed: $RESULT" >&2; exit 1 + fi + ;; + + resolve-id) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" || -z "$NODE_ID" ]]; then + echo "Error: --owner, --repo, --pr, --node-id required" >&2; exit 1 + fi + RESULT=$(gh api "repos/$OWNER/$REPO/pulls/$PR/comments" --paginate \ + --jq ".[] | select(.node_id == \"$NODE_ID\") | .id") + if [[ -n "$RESULT" ]]; then + echo "$RESULT" + else + echo "Comment not found for node_id: $NODE_ID" >&2; exit 1 + fi + ;; + + headline) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$SHA" ]]; then + echo "Error: --owner, --repo, --sha required" >&2; exit 1 + fi + gh api "repos/$OWNER/$REPO/commits/$SHA" --jq '.commit.message | split("\n") | .[0]' + ;; + + fetch-pr-body) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then + echo "Error: --owner, --repo, --pr required" >&2; exit 1 + fi + BODY=$(gh api "repos/$OWNER/$REPO/pulls/$PR" --jq '.body // ""') + echo "$BODY" > /tmp/pr-body.md + echo ">> Wrote PR body to /tmp/pr-body.md ($(wc -c < /tmp/pr-body.md | tr -d ' ') bytes)" + ;; + + autosquash) + DEFAULT_UPSTREAM=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null \ + || echo "origin/$(git remote show origin | sed -n '/HEAD branch/s/.*: //p')") + BASE=$(git merge-base "$DEFAULT_UPSTREAM" HEAD) + GIT_EDITOR=true git -c sequence.editor=: rebase -i "$BASE" --autosquash + echo ">> Autosquash complete" + ;; + + *) + echo "Usage: pr-address.sh {fetch|fetch-thread|reply|resolve-thread|mark-addressed|resolve-id|headline|fetch-pr-body|autosquash} [args]" >&2 + exit 1 + ;; +esac diff --git a/.cursor/skills/pr-create/SKILL.md b/.cursor/skills/pr-create/SKILL.md new file mode 100644 index 0000000..1c3428a --- /dev/null +++ b/.cursor/skills/pr-create/SKILL.md @@ -0,0 +1,106 @@ +--- +name: pr-create +description: Create a pull request from the current branch, with optional Asana attach/assign updates. +compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana updates. ASANA_GITHUB_SECRET for Asana PR attachment. +metadata: + author: j0ntz +--- + +<goal>Create a PR from the current branch, optionally attach it to Asana and assign reviewer.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="use-companion-script">Do NOT call `gh` directly for PR creation. Use `~/.cursor/skills/pr-create/scripts/pr-create.sh`.</rule> +<rule id="no-script-bypass">If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or workarounds.</rule> +<rule id="gh-auth-required">If script exits code 2 with `PROMPT_GH_AUTH`, prompt user to run `gh auth login` and STOP.</rule> +<rule id="no-dirty-pr">Do NOT create a PR when there are uncommitted changes.</rule> +<rule id="no-base-push">Do NOT push to `master`/`develop` directly.</rule> +<rule id="verification-required">Run verification before creating the PR.</rule> +<rule id="flag-contract">`--asana-attach`/`--asana-assign` only run when a task GID is available from chat context or explicit `--asana-task <gid>`. If no task GID is available, fail fast and skip Asana updates.</rule> +<rule id="hands-off-assign">When `--asana-assign` is requested from a hands-off flow, pass `--skip-assign-if-missing` to `asana-task-update.sh` so missing reviewers do not block PR completion.</rule> +<rule id="script-timeouts">Asana updates can take up to 90s. Use `block_until_ms: 120000` for `asana-task-update.sh` calls.</rule> +<rule id="repo-template-required">If the repo has `.github/PULL_REQUEST_TEMPLATE.md`, the PR body must preserve that template's section headings. Do NOT substitute generic sections like `Summary` or `Test plan`.</rule> +</rules> + +<step id="1" name="Push branch"> +Push current branch if needed: + +```bash +git push -u origin HEAD +``` + +If tracking is already configured and branch is up to date, skip. +</step> + +<step id="2" name="Verification"> +Run: + +```bash +~/.cursor/skills/verify-repo.sh . --base <upstream-ref> +``` + +Use `origin/develop` for `edge-react-gui` and `origin/master` for other repos. +</step> + +<step id="3" name="Build PR description"> +Gather context in parallel: + +```bash +DEFAULT_BRANCH=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null | sed 's|origin/||' || git remote show origin 2>/dev/null | sed -n '/HEAD branch/s/.*: //p' || echo master) +git log origin/$DEFAULT_BRANCH..HEAD --format=%B--- +``` + +If `.github/PULL_REQUEST_TEMPLATE.md` exists, read it now and use it as the source of truth for the PR body structure. Fill in its existing sections and only append `### Description` if the template has no description section and branch context needs a place to live. + +If Asana context is available from chat or fetched via `--asana-task`, add it inside `### Description`. Do not invent alternate section sets such as `Summary` / `Test plan`. +</step> + +<step id="4" name="Create PR"> +Write body to `/tmp/pr-body.md`, then run: + +```bash +~/.cursor/skills/pr-create/scripts/pr-create.sh \ + --title "<title>" \ + --body-file /tmp/pr-body.md \ + [--asana-task <task_gid>] +``` + +The companion script validates body files against the repo template and rejects generic fallback sections on templated repos. Capture PR URL and number from JSON output. +</step> + +<step id="5" name="Optional Asana updates"> +If neither `--asana-attach` nor `--asana-assign` was requested, skip. + +If either flag is requested, resolve `task_gid` from: + +1. explicit `--asana-task <gid>` argument +2. chat context (previous task-review/im context) + +If no task GID is available, fail fast and report: + +> Asana flags were requested but no task GID was found in flags or chat context. + +Then call: + +```bash +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task <task_gid> \ + [--attach-pr --pr-url <pr_url> --pr-title "<title>" --pr-number <number>] \ + [--assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs] +``` + +- `--asana-attach` maps to `--attach-pr ...` +- `--asana-assign` maps to `--assign --skip-assign-if-missing --set-status "Review Needed" --auto-est-review-hrs` +- If both are set, combine in one command. +</step> + +<step id="6" name="Report result"> +Display PR URL as a clickable markdown link: + +`[owner/repo#123](https://github.com/owner/repo/pull/123)` +</step> + +<edge-cases> +<case name="Branch already has an open PR">Report the existing PR URL and stop.</case> +<case name="No gh auth">Prompt user to run `gh auth login` and stop.</case> +<case name="Rebase needed">Ask user before rebasing and force-pushing.</case> +</edge-cases> diff --git a/.cursor/skills/pr-create/scripts/pr-create.sh b/.cursor/skills/pr-create/scripts/pr-create.sh new file mode 100755 index 0000000..eb40ead --- /dev/null +++ b/.cursor/skills/pr-create/scripts/pr-create.sh @@ -0,0 +1,331 @@ +#!/usr/bin/env node +// pr-create.sh — Creates a PR for the current branch using gh CLI. +// Usage: ./pr-create.sh [--title "PR title"] [--body-file <path>] [--draft] +// Reads from git context: repo owner/name, current branch, default branch. +// Outputs JSON with PR URL and number on success. + +const { execSync, spawnSync } = require("child_process"); +const fs = require("fs"); +const os = require("os"); +const path = require("path"); + +// Parse args +const args = process.argv.slice(2); +let title = null; +let bodyFile = null; +let draft = false; +let asanaTask = null; + +for (let i = 0; i < args.length; i++) { + if (args[i] === "--title" && args[i + 1]) title = args[++i]; + else if (args[i] === "--body-file" && args[i + 1]) bodyFile = args[++i]; + else if (args[i] === "--asana-task" && args[i + 1]) asanaTask = args[++i]; + else if (args[i] === "--draft") draft = true; +} + +function git(cmd) { + return execSync(`git ${cmd}`, { encoding: "utf8" }).trim(); +} + +function escapeRegExp(value) { + return value.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); +} + +function countOccurrences(haystack, needle) { + const matches = haystack.match(new RegExp(escapeRegExp(needle), "g")); + return matches == null ? 0 : matches.length; +} + +function hasSection(bodyText, heading) { + return new RegExp(`^${escapeRegExp(heading)}$`, "m").test(bodyText); +} + +function extractTemplateHeadings(templateBody) { + return Array.from(templateBody.matchAll(/^### .+$/gm), match => match[0]); +} + +function setChecklistValue(bodyText, label, checked) { + const pattern = new RegExp( + `^- \\[[ x]\\] ${escapeRegExp(label)}$`, + "m" + ); + return bodyText.replace(pattern, `- [${checked ? "x" : " "}] ${label}`); +} + +function appendDescriptionSection(bodyText, description) { + if (description === "") return bodyText.trimEnd(); + return `${bodyText.trimEnd()}\n\n### Description\n\n${description}`; +} + +function insertAfterHeading(bodyText, heading, insertText) { + const headingPattern = new RegExp( + `^${escapeRegExp(heading)}\\n`, + "m" + ); + const match = headingPattern.exec(bodyText); + if (match == null) return null; + + const afterHeading = match.index + match[0].length; + const rest = bodyText.slice(afterHeading).replace(/^\n*/, ""); + return ( + bodyText.slice(0, afterHeading) + + `\n${insertText}\n\n` + + rest + ); +} + +function buildDescriptionFromCommits() { + try { + const log = git(`log origin/${defaultBranch}..HEAD --format=%B---`); + const messages = log + .split("---") + .map(message => message.trim()) + .filter(Boolean); + + if (messages.length === 1) { + const parts = messages[0].split("\n").filter(Boolean); + return parts.length > 1 ? parts.slice(1).join("\n") : "none"; + } + + return "none"; + } catch { + return "none"; + } +} + +function loadRepoTemplate() { + const templatePath = path.join(process.cwd(), ".github", "PULL_REQUEST_TEMPLATE.md"); + if (!fs.existsSync(templatePath)) return null; + + return { + path: templatePath, + body: fs.readFileSync(templatePath, "utf8").replace(/\r\n/g, "\n").trim() + }; +} + +function buildBodyFromTemplate(templateBody) { + let rendered = templateBody; + + if (hasSection(rendered, "### CHANGELOG")) { + rendered = setChecklistValue(rendered, "Yes", hasChangelog); + rendered = setChecklistValue(rendered, "No", !hasChangelog); + } + + const description = buildDescriptionFromCommits(); + return hasSection(rendered, "### Description") + ? rendered + : appendDescriptionSection(rendered, description); +} + +function validateBodyForTemplate(bodyText, templateInfo) { + if (templateInfo == null) return; + + const templateHeadings = extractTemplateHeadings(templateInfo.body); + const missingHeadings = templateHeadings.filter( + heading => !hasSection(bodyText, heading) + ); + if (missingHeadings.length > 0) { + console.error( + "ERROR: PR body is missing required template headings from " + + `${templateInfo.path}: ${missingHeadings.join(", ")}` + ); + process.exit(1); + } + + const genericSections = []; + if (/^## Summary$/m.test(bodyText)) genericSections.push("## Summary"); + if (/^## Test plan$/m.test(bodyText)) genericSections.push("## Test plan"); + if (genericSections.length > 0) { + console.error( + "ERROR: PR body uses generic sections for a repo with a PR template: " + + genericSections.join(", ") + ); + process.exit(1); + } +} + +function requireGh() { + const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }); + if (check.status !== 0) { + console.error("PROMPT_GH_AUTH"); + process.exit(2); + } +} + +requireGh(); + +// Detect repo info from git +const remoteUrl = git("remote get-url origin"); +const normalizedRemoteUrl = remoteUrl.replace(/\/+$/, ""); +const match = normalizedRemoteUrl.match(/[:/]([^/]+)\/([^/.]+?)(?:\.git)?$/); +if (!match) { + console.error("ERROR: Could not parse owner/repo from remote:", remoteUrl); + process.exit(1); +} +const [, owner, repo] = match; + +const branch = git("rev-parse --abbrev-ref HEAD"); +if (["master", "develop", "HEAD"].includes(branch)) { + console.error( + `ERROR: Cannot create PR from '${branch}'. Switch to a feature branch.` + ); + process.exit(1); +} + +// Detect default branch +let defaultBranch; +try { + defaultBranch = git( + "symbolic-ref --quiet --short refs/remotes/origin/HEAD" + ).replace("origin/", ""); +} catch { + try { + const show = execSync("git remote show origin", { encoding: "utf8" }); + defaultBranch = + show.match(/HEAD branch:\s*(.+)/)?.[1]?.trim() || "master"; + } catch { + defaultBranch = "master"; + } +} + +let hasChangelog = false; +try { + const diff = git(`diff origin/${defaultBranch}..HEAD -- CHANGELOG.md`); + hasChangelog = + diff.includes("## Unreleased") || + /^\+- (added|changed|fixed):/m.test(diff); +} catch {} + +const templateInfo = loadRepoTemplate(); + +// Build title from commits/branch if not provided +if (!title) { + try { + const commits = git(`log origin/${defaultBranch}..HEAD --oneline`) + .split("\n") + .filter(Boolean); + if (commits.length === 1) { + title = commits[0].replace(/^[a-f0-9]+\s+/, ""); + } else { + title = branch + .replace(/^jon\//, "") + .replace(/^fix\//, "Fix: ") + .replace(/^feat\//, "") + .replace(/[-_]/g, " ") + .replace(/^\w/, (c) => c.toUpperCase()); + } + } catch { + title = branch; + } +} + +// Read body from file if provided +let body = bodyFile ? fs.readFileSync(bodyFile, "utf8") : null; + +// Build body from template if not provided +if (!body) { + body = + templateInfo == null + ? `### CHANGELOG\n\n` + + `Does this branch warrant an entry to the CHANGELOG?\n\n` + + `- [${hasChangelog ? "x" : " "}] Yes\n` + + `- [${hasChangelog ? " " : "x"}] No\n\n` + + `### Dependencies\n\nnone\n\n### Description\n\n${buildDescriptionFromCommits()}` + : buildBodyFromTemplate(templateInfo.body); +} + +validateBodyForTemplate(body, templateInfo); + +// Guardrail: fail fast if the body appears to include duplicate templates. +// This prevents accidental append/concatenation from creating malformed PR descriptions. +const templateSectionCounts = { + changelog: countOccurrences(body, "### CHANGELOG"), + dependencies: countOccurrences(body, "### Dependencies"), + description: countOccurrences(body, "### Description") +}; +if ( + templateSectionCounts.changelog > 1 || + templateSectionCounts.dependencies > 1 || + templateSectionCounts.description > 1 +) { + console.error( + "ERROR: PR body contains duplicated template sections. Regenerate /tmp/pr-body.md and retry." + ); + console.error(JSON.stringify(templateSectionCounts)); + process.exit(1); +} + +// Guardrail: fail fast on duplicated PR template sections. +// This catches stale/concatenated body files before creating malformed PRs. +const sectionCounts = { + changelog: countOccurrences(body, "### CHANGELOG"), + dependencies: countOccurrences(body, "### Dependencies"), + description: countOccurrences(body, "### Description"), +}; +if ( + sectionCounts.changelog > 1 || + sectionCounts.dependencies > 1 || + sectionCounts.description > 1 +) { + console.error( + "ERROR: PR body appears to contain duplicated template sections. " + + "Regenerate the body file and retry." + ); + console.error(JSON.stringify(sectionCounts)); + process.exit(1); +} + +// Inject Asana link if provided and not already present +if (asanaTask) { + const asanaUrl = `https://app.asana.com/0/0/${asanaTask}/f`; + const asanaRegex = new RegExp(`https://app\\.asana\\.com/\\d+/\\d+/(?:task/)?${asanaTask}`, "i"); + if (!asanaRegex.test(body)) { + const link = `[Asana task](${asanaUrl})`; + body = + insertAfterHeading(body, "### Description", link) ?? + appendDescriptionSection(body, link); + } +} + +// Create PR via gh CLI — write body to a temp file to avoid arg length issues +const tmpBody = path.join(os.tmpdir(), `pr-body-${process.pid}.md`); +fs.writeFileSync(tmpBody, body, "utf8"); +const ghArgs = ["pr", "create", "--title", title, "--body-file", tmpBody]; +if (draft) ghArgs.push("--draft"); + +const result = spawnSync("gh", ghArgs, { encoding: "utf8" }); +try { fs.unlinkSync(tmpBody); } catch {} +if (bodyFile && bodyFile.startsWith(os.tmpdir())) { + try { + fs.unlinkSync(bodyFile); + } catch {} +} +if (result.status !== 0) { + console.error("ERROR:", (result.stderr || "").trim()); + process.exit(1); +} + +// gh pr create outputs the PR URL on stdout (--json not supported in older gh) +const prUrl = (result.stdout || "").trim(); +const prMatch = prUrl.match(/\/pull\/(\d+)$/); +if (!prMatch) { + console.error("ERROR: Could not parse PR URL from output:", prUrl); + process.exit(1); +} + +console.log( + JSON.stringify( + { + url: prUrl, + number: parseInt(prMatch[1], 10), + title, + base: defaultBranch, + head: branch, + draft, + owner, + repo, + }, + null, + 2 + ) +); diff --git a/.cursor/skills/pr-land/SKILL.md b/.cursor/skills/pr-land/SKILL.md new file mode 100644 index 0000000..508df06 --- /dev/null +++ b/.cursor/skills/pr-land/SKILL.md @@ -0,0 +1,354 @@ +--- +name: pr-land +description: Land approved PRs by autosquashing fixups, rebasing onto the default upstream branch, and merging. Use when the user wants to merge/land pull requests. +compatibility: Requires git, gh, node, jq. ASANA_TOKEN for Asana updates. +metadata: + author: j0ntz +--- + +<goal>Land approved PRs on `$GIT_BRANCH_PREFIX/*` branches by autosquashing fixups, rebasing onto the default upstream branch, and pushing.</goal> + +<usage> +``` +/pr-land # All EdgeApp repos with $GIT_BRANCH_PREFIX/* PRs +/pr-land edge-react-gui # Specific repo +/pr-land edge-react-gui edge-core-js # Multiple repos +``` +</usage> + +<rules description="Non-negotiable constraints."> +<rule id="scripts-only">All GitHub API calls go through companion scripts that use `gh` CLI internally. Do NOT call `gh` or `curl` directly for GitHub operations — use the scripts.</rule> +<rule id="gh-auth">If a script exits code 2 with `PROMPT_GH_AUTH`, prompt the user to run `gh auth login`.</rule> +<rule id="code-conflicts">Code conflicts → Skip PR. Abort the rebase to leave the repo clean, continue with remaining PRs. Report all skipped PRs at the end.</rule> +<rule id="stale-prs">Stale PRs → Skip and report. Old PRs with multiple conflicts should be skipped like code conflicts. Don't block the flow.</rule> +<rule id="changelog-conflicts">CHANGELOG conflicts (any section, including staging): Agent resolves semantically, scripts verify the result.</rule> +<rule id="verification">Verification is mandatory. Built into scripts, no bypass.</rule> +<rule id="no-force-push">Do NOT force-push without explicit user confirmation.</rule> +<rule id="no-editors">Never open editors. All git operations must be non-interactive: `GIT_EDITOR=true` for commit messages, `GIT_SEQUENCE_EDITOR=:` for rebase todo lists.</rule> +<rule id="unexpected-exit">Unexpected exit codes → STOP immediately. If any script returns an exit code not documented in this file, STOP and report to user. Do NOT attempt to interpret, retry, or work around unexpected errors.</rule> +<rule id="sequential-rebase">Sequential merging requires rebase. Each subsequent PR MUST be rebased onto the updated base branch after the previous merge.</rule> +<rule id="publish-gating">Don't publish if outstanding PRs remain. Only offer to publish a repo when ALL approved PRs for that repo are merged. If any were skipped or held back, do NOT publish that repo.</rule> +<rule id="npm-publish-gate">Step 8 CANNOT begin until the user explicitly confirms npm publish succeeded. `npm publish` requires interactive 2FA — the agent cannot run it. Do NOT infer publish completion from git push or tagging. STOP and WAIT for user confirmation.</rule> +<rule id="asana-last">Asana updates are LAST. Do NOT update Asana tasks until ALL merges, publishes, and GUI dependency upgrades are complete. Only update status for PRs that are fully landed (merged, and if non-GUI: published + GUI deps updated).</rule> +</rules> + +<scripts description="Companion scripts and their expected exit codes."> + +| Script | Purpose | +|--------|---------| +| `pr-land-discover.sh` | Discover PRs and approval status | +| `pr-land-comments.sh` | Check for recent unaddressed feedback (inline threads, review bodies, top-level comments) | +| `pr-land-prepare.sh` | Rebase + conflict detection + verification | +| `verify-repo.sh` | Verification (CHANGELOG + code; lint scoped to changed files when `--base` given) | +| `pr-land-merge.sh` | Rebase + verify + merge via GitHub API | +| `pr-land-publish.sh` | Version bump, changelog update, commit + tag (no push) | +| `staging-cherry-pick.sh` | Cherry-pick merged PR commits onto staging (see `/staging-cherry-pick` skill) | +| `asana-task-update.sh` | Update linked Asana tasks after merge | + +| Script | Exit 0 | Exit 1 | Exit 2 | Exit 3 | Exit 4 | +|--------|--------|--------|--------|--------|--------| +| `pr-land-discover.sh` | Success | Error | - | - | - | +| `pr-land-comments.sh` | Success | Error | - | - | - | +| `pr-land-prepare.sh` | Ready | All failed | - | - | - | +| `verify-repo.sh` | Pass | Code fail | CHANGELOG fail | - | - | +| `pr-land-merge.sh` | Merged | Verify fail | - | - | CHANGELOG conflict | +| `staging-cherry-pick.sh` | All cherry-picked | Error | Auth needed | CHANGELOG conflict | - | +| `pr-land-publish.sh` | Ready (needs push) | Verify fail | No unreleased | - | - | +| `asana-task-update.sh` | Success | Error | Needs user input | - | - | + +**Any exit code not in this table = STOP immediately and report to user.** +</scripts> + +<step id="1" name="Discovery"> +ONE tool call: + +```bash +~/.cursor/skills/pr-land/scripts/pr-land-discover.sh [repo1 repo2 ...] +``` + +Returns JSON with all `$GIT_BRANCH_PREFIX/*` PRs and their approval status. +</step> + +<step id="2" name="Comment Check and Addressing"> +```bash +echo '[{"repo":"...","prNumber":123,"branch":"<prefix>/..."}]' | ~/.cursor/skills/pr-land/scripts/pr-land-comments.sh +``` + +Returns PRs with unaddressed feedback posted after the last commit. The script checks **three sources**: + +1. **Unresolved inline review threads** — threads where `isResolved: false` with comments newer than last commit +2. **Review bodies** — the latest review from each non-author/non-bot reviewer, if it has a non-empty body newer than last commit (catches feedback written in the approve/reject dialog, regardless of review state) +3. **Top-level PR comments** — non-author/non-bot comments newer than last commit + +Items previously marked with `<!-- addressed:review:ID -->` or `<!-- addressed:comment:ID -->` markers are automatically excluded. + +<sub-step name="Comment handling"> +1. AI/bot comments: Already filtered out by the script. +2. Human reviewer comments on approved PRs — address and set aside: + 1. Read the comment and understand the requested change + 2. Make the fix as a fixup commit: `~/.cursor/skills/im/scripts/lint-commit.sh --fixup <hash> [files...]` + 3. Push the fixup to the branch + 4. Reply on the PR thread explaining what was fixed (1 sentence, factual). Use `gh pr comment <number> --repo EdgeApp/<repo> --body "..."` for top-level comments, or reply to the specific thread if the feedback was inline. + 5. **Remove this PR from the merge set** — it needs re-review after the fixup + 6. Continue with remaining PRs that have no outstanding comments + 7. Report addressed PRs to the user at the end of the workflow + +**Do NOT block the rest of the flow** for PRs with comments. +</sub-step> +</step> + +<step id="3" name="Prepare Branches"> +ONE tool call per batch: + +```bash +echo '[{"repo":"...","branch":"<prefix>/feature"}]' | ~/.cursor/skills/pr-land/scripts/pr-land-prepare.sh +``` + +The prepare script handles: clone/checkout, autosquash fixups, rebase onto upstream, conflict detection, and verification. + +**Exit codes:** +- `0` = At least one PR ready to push (skipped PRs reported in JSON output) +- `1` = All PRs failed (verification or other errors, none ready) + +<sub-step name="On code conflict">PR is skipped and reported in the `skipped` array. Rebase is aborted to leave repo clean. Other PRs continue.</sub-step> + +<sub-step name="On CHANGELOG conflict">Agent resolves semantically (upstream entries first, then ours), then re-runs prepare.</sub-step> +</step> + +<step id="4" name="Push"> +After prepare succeeds, push with `--force-with-lease`. +</step> + +<step id="5" name="Merge"> +Ask for user confirmation, then: + +```bash +echo '[{"repo":"...","prNumber":123,"branch":"<prefix>/..."}]' | ~/.cursor/skills/pr-land/scripts/pr-land-merge.sh [method] +``` + +The merge script processes PRs **sequentially** with automatic rebase-before-merge: + +1. **Check if already merged** — skip (handles re-runs after CHANGELOG resolution) +2. **Fetch + rebase onto upstream** — ALWAYS done, even for first PR +3. **Conflict handling during rebase:** + - No conflict → continue + - CHANGELOG-only (any section) → **exit 4** (agent resolves, re-runs) + - Code conflict → **skip PR**, abort rebase, continue +4. **Push `--force-with-lease`** +5. **Run local verification** (MANDATORY) +6. **Merge via GitHub API** + +**Exit codes:** +- `0` = All (non-skipped) PRs merged +- `1` = Verification failed +- `4` = CHANGELOG-only conflict (agent resolves, re-runs) + +**On exit 4:** Agent resolves semantically, pushes, re-runs merge. Script detects already-merged PRs and skips them. +</step> + +<step id="6" name="Staging Cherry-Pick"> +**Trigger:** Only for `edge-react-gui` PRs whose CHANGELOG entries are in the `## X.Y.Z (staging)` section (not `## Unreleased`). Check the merged PR's CHANGELOG diff to determine this — if the entry was added under a `(staging)` heading, the PR needs cherry-picking. + +**Skip** this step entirely if no merged PRs have staging CHANGELOG entries. + +For qualifying PRs, invoke the `/staging-cherry-pick` skill: + +```bash +echo '[{"repo":"edge-react-gui","prNumber":123,"mergeSha":"abc123"}]' | ~/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh +``` + +Pass the `mergeSha` from the merge step's JSON output. The script cherry-picks individual (non-merge) commits onto the staging branch. + +**On exit 3 (CHANGELOG conflict):** Resolve semantically (existing staging entries first, then the new entry), then `git add CHANGELOG.md && GIT_EDITOR=true git cherry-pick --continue`. Re-run for remaining PRs. + +**On exit 1 (code conflict):** STOP and report to user. + +After cherry-picks succeed, ask user to confirm push: +```bash +git push origin staging +``` + +Then restore the previous branch. +</step> + +<step id="7" name="Publish"> +**Gating:** Only non-GUI repos. Only when ALL approved PRs for the repo are merged. Skip if any were skipped/held back. + + +Ask for user confirmation: +``` +Merged repos ready to publish (all PRs landed): + - <repo> (<branch>) + +Repos with outstanding PRs (not ready to publish): + - <repo> (N PRs skipped) + +Publish ready repos to npm? [y/N] +``` + +If confirmed: + +```bash +echo '[{"repo":"...","branch":"master"}]' | ~/.cursor/skills/pr-land/scripts/pr-land-publish.sh +``` + +**Exit codes:** +- `0` = Version bumped, committed, tagged (check `needsPush` in JSON output) +- `1` = Verification failed +- `2` = No unreleased changes in CHANGELOG + +After script completes: +1. Show version bump details to user +2. If confirmed, push master and tag: `git push origin master && git push origin v<version>` +3. Prompt user to run `npm publish` in a real terminal (requires interactive 2FA) + +**STOP HERE. Do NOT proceed to step 8 until the user confirms npm publish succeeded.** +</step> + +<step id="8" name="Update GUI Dependencies"> +**Trigger:** Only if non-`edge-react-gui` repos were merged and published in steps 5–7. + +Ask user to confirm `npm publish` completed, then: + +1. Save current branch and switch to develop: + ```bash + cd <gui-repo-dir> + ORIG_BRANCH=$(git rev-parse --abbrev-ref HEAD) + git checkout develop && git pull origin develop + ``` + +2. Run `upgrade-dep.sh` for each published package (sequentially): + ```bash + cd <gui-repo-dir> && ~/.cursor/skills/pr-land/scripts/upgrade-dep.sh <package-name> + ``` + If any fails, STOP and report. Ask user how to proceed. + +3. Restore original branch: + ```bash + cd <gui-repo-dir> + git checkout $ORIG_BRANCH + git stash pop + ``` + If stash pop fails with conflicts, STOP and report. If "No stash entries", that's fine. +</step> + +<step id="9" name="Update Asana Tasks"> +**Runs ONLY after ALL merges, cherry-picks, publishes, and GUI dep upgrades are complete.** + +Only update for fully landed PRs: +- GUI PRs: merged +- Non-GUI PRs: merged AND published AND GUI deps updated + +Do NOT update for: skipped PRs, addressed-but-not-re-reviewed PRs, or repos not published. + +<sub-step name="Extract Asana task GIDs"> +Pipe the PR metadata through the new helper so you only consume the Asana link once per PR: + +```bash +printf '[{"repo":"edge-react-gui","prNumber":123}]' | ~/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh > /tmp/asana.json +``` + +The helper outputs JSON like `{ "tasks": [{ "taskGid": "...", "label": "repo#123" }], "missing": [{ "label": "...", "reason": "..." }] }`. +Review the `missing` array, report any entries lacking an Asana link, and skip those PRs for Asana updates. +</sub-step> + +<sub-step name="Update tasks"> +For each task in `.tasks`, run: + +```bash +~/.cursor/skills/asana-task-update/scripts/asana-task-update.sh \ + --task <task_gid> \ + --set-status "Verification Needed" \ + --unassign +``` + +This replaces the old dedicated verification updater behavior. + +**Exit codes per call:** +- `0` = success +- `1` = error +- `2` = needs user input +</sub-step> +</step> + +<step id="10" name="End-of-Workflow Report"> +``` +=== PR Land Summary === + +Fully landed: + ✓ <repo>#<number> (<branch>) — merged, cherry-picked to staging, Asana updated + ✓ <repo>#<number> (<branch>) — merged, Asana updated + ✓ <repo>#<number> (<branch>) — merged, published v<version>, GUI deps updated, Asana updated + +Addressed but needs re-review: + ⚠ <repo>#<number> (<branch>) — fixup pushed, awaiting review + +Skipped (conflicts): + ⚠ <repo>#<number> (<branch>) — stale / code conflict in <file> + +Not published (outstanding PRs): + ⚠ <repo> — N PRs skipped, publish deferred +``` +</step> + +<conflict-handling description="Summary of conflict types and resolution."> + +| Conflict Type | Script Behavior | Agent Action | +|---|---|---| +| Code files | Skip PR, abort rebase, continue | Report to user at end | +| CHANGELOG only (prepare) | Report conflict | Resolve semantically, re-run prepare | +| CHANGELOG only (merge) | **exit 4** with instructions | Resolve semantically, push, re-run merge | + +Both prepare and merge scripts can detect CHANGELOG-only conflicts. In either case: +1. Script outputs clear resolution instructions +2. Agent resolves semantically (upstream entries first) +3. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue` +4. Push with `--force-with-lease` +5. Re-run the script to verify and proceed +</conflict-handling> + +<changelog-resolution description="How the agent resolves CHANGELOG conflicts."> +``` +# Typical conflict: +<<<<<<< HEAD +- added: Feature from upstream +======= +- changed: Our feature +>>>>>>> our-commit + +# Resolution: Upstream first, then ours: +- added: Feature from upstream +- changed: Our feature +``` + +<sub-step name="During prepare (no push yet)"> +1. Read CHANGELOG.md with conflict markers +2. Resolve semantically using StrReplace +3. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue` +4. Re-run `~/.cursor/skills/pr-land/scripts/pr-land-prepare.sh` +</sub-step> + +<sub-step name="During merge (already pushed, GitHub reports conflict)"> +1. `cd <repoDir>` +2. `git fetch origin && git rebase origin/master` (or `origin/develop`) +3. Read CHANGELOG.md with conflict markers +4. Resolve semantically using StrReplace +5. `git add CHANGELOG.md && GIT_EDITOR=true git rebase --continue` +6. `git push --force-with-lease` +7. Re-run `~/.cursor/skills/pr-land/scripts/pr-land-merge.sh` — verification runs automatically +</sub-step> + +Verification checks: no conflict markers remaining, proper entry format (`- type: description`), no malformed entries. If verification fails after resolution, the script prompts the user. +</changelog-resolution> + +<safety-guarantees> +1. Code conflicts skip cleanly — scripts abort rebase and skip, no dirty state +2. CHANGELOG conflicts are scripted — agent resolves semantically (any section including staging), verification validates +3. Verification is mandatory — built into merge script, physically blocks merge on failure +4. Pre-merge is safe — can force-push as many times as needed +5. Sequential merging with auto-rebase — each PR rebased onto updated base +6. No bypasses — scripts enforce rules, agent cannot skip steps +7. Unexpected errors halt execution — undocumented exit codes stop immediately +8. Publish gating — repos with outstanding PRs are not published +9. Asana is last — task updates only after full pipeline completes +</safety-guarantees> diff --git a/.cursor/skills/pr-land/scripts/edge-repo.js b/.cursor/skills/pr-land/scripts/edge-repo.js new file mode 100644 index 0000000..af85b4c --- /dev/null +++ b/.cursor/skills/pr-land/scripts/edge-repo.js @@ -0,0 +1,152 @@ +// edge-repo.js — Shared Edge repository utilities. +// Common functions for repo discovery, git operations, and conflict handling. +// Used by: pr-land-prepare.sh, pr-land-merge.sh, pr-land-publish.sh +const { spawnSync, execSync } = require("child_process"); +const { existsSync } = require("fs"); +const path = require("path"); +const os = require("os"); + +function getRepoDir(repo) { + const homeDir = os.homedir(); + const candidates = [ + path.join(homeDir, "git", repo), + path.join(homeDir, "projects", repo), + path.join(homeDir, "code", repo), + ]; + for (const dir of candidates) { + if (existsSync(path.join(dir, ".git"))) return dir; + } + return path.join(homeDir, "git", repo); +} + +function getUpstreamBranch(repo) { + return repo === "edge-react-gui" ? "origin/develop" : "origin/master"; +} + +function runGit(args, cwd, options = {}) { + const { allowFailure = false } = options; + const argArray = Array.isArray(args) ? args : args.split(" "); + const result = spawnSync("git", argArray, { + cwd, + encoding: "utf8", + env: { ...process.env, GIT_EDITOR: "true", GIT_SEQUENCE_EDITOR: ":" }, + }); + + if (result.status !== 0 && !allowFailure) { + throw new Error( + (result.stderr || result.stdout || "Unknown git error").trim() + ); + } + + return { + success: result.status === 0, + stdout: result.stdout?.trim() || "", + stderr: result.stderr?.trim() || "", + }; +} + +function parseConflictFiles(output) { + const files = []; + for (const line of output.split("\n")) { + const match = line.match(/CONFLICT.*in (.+)$/); + if (match) files.push(match[1]); + const bothMatch = line.match(/^\s+both modified:\s+(.+)$/); + if (bothMatch) files.push(bothMatch[1]); + } + return [...new Set(files)]; +} + +function isChangelogOnly(files) { + return ( + files.length > 0 && + files.every((f) => f === "CHANGELOG.md" || f.endsWith("/CHANGELOG.md")) + ); +} + +function runVerification(repoDir, baseRef, options = {}) { + const verifyScript = path.join( + os.homedir(), + ".cursor", + "skills", + "verify-repo.sh" + ); + const baseArg = baseRef != null ? ` --base "${baseRef}"` : ""; + const changelogArg = options.requireChangelog ? " --require-changelog" : ""; + const skipInstallArg = options.skipInstall ? " --skip-install" : ""; + try { + execSync( + `node "${verifyScript}" "${repoDir}"${baseArg}${changelogArg}${skipInstallArg}`, + { stdio: "inherit", encoding: "utf8" } + ); + return { success: true }; + } catch (e) { + return { success: false, exitCode: e.status }; + } +} + +// gh CLI wrapper for GitHub API calls +function ghApi(endpoint, options = {}) { + const { method, body, paginate, jq } = options; + const args = ["api", endpoint]; + if (method && method !== "GET") args.push("-X", method); + if (paginate) args.push("--paginate"); + if (jq) args.push("--jq", jq); + if (body) args.push("--input", "-"); + + const result = spawnSync("gh", args, { + encoding: "utf8", + input: body ? JSON.stringify(body) : undefined, + }); + + if (result.status !== 0) { + throw new Error( + `gh api ${endpoint} failed: ${(result.stderr || "").trim()}` + ); + } + + const out = result.stdout.trim(); + if (!out) return null; + try { + return JSON.parse(out); + } catch { + return out; + } +} + +function ghGraphql(query, variables = {}) { + const args = ["api", "graphql", "-f", `query=${query}`]; + for (const [k, v] of Object.entries(variables)) { + args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`); + } + + const result = spawnSync("gh", args, { encoding: "utf8" }); + + if (result.status !== 0) { + throw new Error( + `gh api graphql failed: ${(result.stderr || "").trim()}` + ); + } + + const parsed = JSON.parse(result.stdout); + if (parsed.errors) { + throw new Error(`GraphQL errors: ${JSON.stringify(parsed.errors)}`); + } + return parsed.data; +} + +function installAndPrepare(repoDir) { + const script = path.join(__dirname, "..", "..", "install-deps.sh"); + execSync(`"${script}" "${repoDir}"`, { stdio: "inherit" }); +} + +module.exports = { + getRepoDir, + getUpstreamBranch, + runGit, + parseConflictFiles, + isChangelogOnly, + runVerification, + installAndPrepare, + ghApi, + ghGraphql, +}; diff --git a/.cursor/skills/pr-land/scripts/pr-land-comments.sh b/.cursor/skills/pr-land/scripts/pr-land-comments.sh new file mode 100755 index 0000000..4a3f23d --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-comments.sh @@ -0,0 +1,189 @@ +#!/usr/bin/env node +// pr-land-comments.sh — Landing gate: checks for recent unaddressed feedback. +// Surfaces unresolved inline threads, review bodies, and top-level comments +// posted after the last commit. Uses a single GraphQL query per PR. +// +// Skips: resolved threads, bot/author comments, items with addressed markers. +// +// Usage: echo '[{"repo":"...","prNumber":123,"branch":"..."}]' | ./pr-land-comments.sh + +const { spawnSync } = require("child_process") + +function requireGh() { + const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }) + if (check.status !== 0) { + console.error("PROMPT_GH_AUTH") + process.exit(2) + } +} + +function ghGraphql(query, variables = {}) { + const args = ["api", "graphql", "-f", `query=${query}`] + for (const [k, v] of Object.entries(variables)) { + args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`) + } + const result = spawnSync("gh", args, { encoding: "utf8" }) + if (result.status !== 0) { + throw new Error(`GraphQL failed: ${(result.stderr || "").trim()}`) + } + const parsed = JSON.parse(result.stdout) + if (parsed.errors) { + throw new Error(`GraphQL errors: ${JSON.stringify(parsed.errors)}`) + } + return parsed.data +} + +const QUERY = ` +query($owner: String!, $repo: String!, $number: Int!) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $number) { + author { login } + commits(last: 1) { + nodes { commit { committedDate } } + } + reviewThreads(first: 100) { + nodes { + id + isResolved + comments(first: 50) { + nodes { + createdAt + author { login } + path + body + } + } + } + } + reviews(last: 50) { + nodes { + databaseId + author { login } + state + body + submittedAt + } + } + comments(last: 50) { + nodes { + databaseId + createdAt + author { login } + body + } + } + } + } +}` + +requireGh() + +function extractAddressedIds(comments) { + const ids = new Set() + for (const c of comments) { + for (const m of (c.body || "").matchAll( + /<!-- addressed:(?:review|comment):(\d+) -->/g + )) { + ids.add(Number(m[1])) + } + } + return ids +} + +function isBot(login) { + return !login || login.includes("[bot]") +} + +async function main() { + let input = "" + for await (const chunk of process.stdin) input += chunk + + const prs = JSON.parse(input) + const results = [] + + for (const { repo, prNumber, branch } of prs) { + let data + try { + data = ghGraphql(QUERY, { owner: "EdgeApp", repo, number: prNumber }) + } catch (e) { + console.error( + `WARNING: Failed to query ${repo}#${prNumber}: ${e.message}` + ) + continue + } + + const pr = data.repository.pullRequest + const prAuthor = pr.author?.login + const lastCommitDate = pr.commits.nodes[0] + ? new Date(pr.commits.nodes[0].commit.committedDate) + : new Date(0) + + const addressedIds = extractAddressedIds(pr.comments.nodes) + const recentComments = [] + + for (const thread of pr.reviewThreads.nodes) { + if (thread.isResolved) continue + for (const c of thread.comments.nodes) { + if (new Date(c.createdAt) > lastCommitDate) { + recentComments.push({ + type: "inline", + user: c.author?.login, + path: c.path, + body: c.body?.slice(0, 200) + }) + } + } + } + + const latestByUser = {} + for (const r of pr.reviews.nodes) { + const user = r.author?.login + if (!user || user === prAuthor || r.state === "PENDING") continue + if (isBot(user)) continue + const prev = latestByUser[user] + if ( + !prev || + new Date(r.submittedAt) > new Date(prev.submittedAt) + ) { + latestByUser[user] = r + } + } + for (const [user, r] of Object.entries(latestByUser)) { + if (!r.body?.trim()) continue + if (addressedIds.has(r.databaseId)) continue + if (new Date(r.submittedAt) > lastCommitDate) { + recentComments.push({ + type: "review-body", + user, + state: r.state, + body: r.body.slice(0, 200) + }) + } + } + + for (const c of pr.comments.nodes) { + const user = c.author?.login + if (!user || user === prAuthor || isBot(user)) continue + if ((c.body || "").includes("<!-- addressed:")) continue + if (addressedIds.has(c.databaseId)) continue + if (new Date(c.createdAt) > lastCommitDate) { + recentComments.push({ + type: "top-level", + user, + body: c.body?.slice(0, 200) + }) + } + } + + if (recentComments.length > 0) { + results.push({ repo, prNumber, branch, recentComments }) + } + } + + console.log(JSON.stringify(results, null, 2)) +} + +main().catch(e => { + console.error(e) + process.exit(1) +}) diff --git a/.cursor/skills/pr-land/scripts/pr-land-discover.sh b/.cursor/skills/pr-land/scripts/pr-land-discover.sh new file mode 100755 index 0000000..b7a5d4f --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-discover.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env node +// pr-land-discover.sh — Discovers all user's open PRs across EdgeApp repos +// with approval status using a single GraphQL query. +// +// Usage: ./pr-land-discover.sh [repo1] [repo2] ... +// Example: ./pr-land-discover.sh edge-react-gui edge-core-js +// Example: ./pr-land-discover.sh (no args = all EdgeApp repos) + +const { spawnSync } = require("child_process"); + +const specifiedRepos = process.argv.slice(2); +const edgeAppRepos = [ + "edge-react-gui", + "edge-exchange-plugins", + "edge-currency-accountbased", + "edge-core-js", + "edge-login-ui-rn", + "edge-currency-plugins", +]; +const repos = specifiedRepos.length > 0 ? specifiedRepos : edgeAppRepos; + +function requireGh() { + const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }); + if (check.status !== 0) { + console.error("PROMPT_GH_AUTH"); + process.exit(2); + } +} + +function ghGraphql(query, variables = {}) { + const args = ["api", "graphql", "-f", `query=${query}`]; + for (const [k, v] of Object.entries(variables)) { + args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`); + } + const result = spawnSync("gh", args, { encoding: "utf8" }); + if (result.status !== 0) { + throw new Error(`GraphQL failed: ${(result.stderr || "").trim()}`); + } + const parsed = JSON.parse(result.stdout); + if (parsed.errors) { + throw new Error(`GraphQL errors: ${JSON.stringify(parsed.errors)}`); + } + return parsed.data; +} + +requireGh(); + +// Build a single GraphQL query with aliases for all repos. +// Each alias fetches open PRs + latest review state in one round-trip. +const repoFragments = repos + .map((repo, i) => { + const alias = `repo${i}`; + return `${alias}: repository(owner: "EdgeApp", name: "${repo}") { + name + pullRequests(first: 100, states: OPEN) { + nodes { + number + title + headRefName + updatedAt + reviews(last: 30) { + nodes { + author { login } + state + submittedAt + } + } + } + } + }`; + }) + .join("\n "); + +const query = `{ ${repoFragments} }`; + +let data; +try { + data = ghGraphql(query); +} catch (e) { + console.error("ERROR:", e.message); + process.exit(1); +} + +const results = { prs: [], errors: [] }; + +for (const key of Object.keys(data)) { + const repoData = data[key]; + if (!repoData) continue; + const repo = repoData.name; + + for (const pr of repoData.pullRequests.nodes) { + if (!pr.headRefName.startsWith("jon/")) continue; + + // Dedupe reviews: keep latest per reviewer + const latestByUser = {}; + for (const r of pr.reviews.nodes) { + const login = r.author?.login; + if (!login) continue; + if ( + !latestByUser[login] || + new Date(r.submittedAt) > new Date(latestByUser[login].submittedAt) + ) { + latestByUser[login] = r; + } + } + + const reviewers = Object.values(latestByUser); + const approved = reviewers.some((r) => r.state === "APPROVED"); + const changesRequested = reviewers.some( + (r) => r.state === "CHANGES_REQUESTED" + ); + + results.prs.push({ + repo, + prNumber: pr.number, + branch: pr.headRefName, + title: pr.title, + updatedAt: pr.updatedAt, + approved, + changesRequested, + reviewers: reviewers.map((r) => ({ + user: r.author.login, + state: r.state, + })), + }); + } +} + +results.prs.sort( + (a, b) => a.repo.localeCompare(b.repo) || a.branch.localeCompare(b.branch) +); +console.log(JSON.stringify(results, null, 2)); diff --git a/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh b/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh new file mode 100755 index 0000000..d8dbc7e --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env node +// pr-land-extract-asana-task.sh +// Extracts Asana task GIDs from PR bodies so /pr-land can skip loading full descriptions. +// Input: JSON array of {repo, prNumber}. Output: JSON object {tasks: [...], missing: [...]}, where each entry contains label/repo info. +// +// The script is intentionally terse: it only emits structured JSON and does not print raw PR bodies. +const { execSync } = require("child_process"); +const path = require("path"); + +async function readStdin() { + let input = ""; + for await (const chunk of process.stdin) { + input += chunk; + } + return input.trim(); +} + +function fetchPrBody(repo, prNumber) { + const endpoint = `repos/EdgeApp/${repo}/pulls/${prNumber}`; + const result = execSync(`gh api "${endpoint}" --jq '.body'`, { + encoding: "utf8", + stdio: ["ignore", "pipe", "pipe"], + }); + return result.trim(); +} + +function buildLabel(repo, prNumber) { + return `${repo}#${prNumber}`; +} + +async function main() { + const input = await readStdin(); + if (!input) { + console.error("Error: no input received (expecting JSON array with repo/prNumber)"); + process.exit(2); + } + + let entries; + try { + entries = JSON.parse(input); + } catch (err) { + console.error("Error: failed to parse JSON input"); + process.exit(2); + } + + const regex = /https:\/\/app\.asana\.com\/\d+\/\d+\/(?:task\/)?(\d+)/i; + const tasks = []; + const missing = []; + + for (const { repo, prNumber } of entries) { + const label = buildLabel(repo, prNumber); + let body; + try { + body = fetchPrBody(repo, prNumber); + } catch (err) { + missing.push({ + label, + reason: `Failed to fetch PR body: ${err.message}`, + }); + continue; + } + + if (!body) { + missing.push({ + label, + reason: "PR body empty", + }); + continue; + } + + const match = body.match(regex); + if (match) { + tasks.push({ + taskGid: match[1], + label, + }); + } else { + missing.push({ + label, + reason: "No Asana link found", + }); + } + } + + console.log(JSON.stringify({ tasks, missing }, null, 2)); + process.exit(0); +} + +main().catch((err) => { + console.error(`Error: ${err.message}`); + process.exit(1); +}); diff --git a/.cursor/skills/pr-land/scripts/pr-land-merge.sh b/.cursor/skills/pr-land/scripts/pr-land-merge.sh new file mode 100755 index 0000000..5492d73 --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-merge.sh @@ -0,0 +1,429 @@ +#!/usr/bin/env node +// pr-land-merge.sh +// Merges PRs via GitHub API with automatic rebase and MANDATORY local verification. +// Uses gh CLI for API calls and edge-repo.js for shared utilities. +// +// Usage: echo '[{"repo":"edge-react-gui","prNumber":123,"branch":"jon/feature"}]' | ./pr-land-merge.sh [method] +// Methods: merge (default), squash, rebase +// +// For each PR (sequentially): +// 1. Check if already merged (skip if so — handles re-runs) +// 2. Fetch + rebase onto latest upstream (picks up prior merges) +// 3. Push --force-with-lease +// 4. Run local verification (MANDATORY) +// 5. Merge via GitHub API +// +// SAFETY GUARANTEES: +// 1. Each PR is rebased onto latest upstream before merge (handles sequential merges) +// 2. Verification runs before EVERY merge (no bypass) +// 3. Code conflicts → Skip PR, continue with remaining +// 4. CHANGELOG-only conflicts → Agent can resolve, then re-run +// 5. Already-merged PRs are detected and skipped on re-runs +// +// Exit codes: +// 0 = All (non-skipped) PRs merged successfully +// 1 = Verification failed +// 4 = CHANGELOG-only conflict (agent can resolve semantically) + +const { spawnSync } = require("child_process"); +const path = require("path"); +const { + getRepoDir, + getUpstreamBranch, + runGit, + parseConflictFiles, + isChangelogOnly, + runVerification, + ghApi, + installAndPrepare, +} = require(path.join(__dirname, "edge-repo.js")); + +function sanitizeBranchLabel(branch) { + return branch.replace(/[^a-z0-9]/gi, "-").replace(/-+/g, "-").replace(/^-|-$/g, ""); +} + +function describeBranchState(repoDir, branch) { + const notes = []; + const local = runGit(["rev-parse", branch], repoDir, { allowFailure: true }); + notes.push(local.success ? `Local commit (${branch}): ${local.stdout}` : `Local branch "${branch}" missing`); + + const remote = runGit(["rev-parse", `origin/${branch}`], repoDir, { allowFailure: true }); + notes.push(remote.success ? `Remote commit (origin/${branch}): ${remote.stdout}` : `Remote branch origin/${branch} missing`); + + const status = runGit(["status", "-sb"], repoDir, { allowFailure: true }); + if (status.stdout) { + notes.push(`Status: ${status.stdout.trim()}`); + } + return notes.join("\n"); +} + +function fetchBranchForPush(repoDir, branch) { + runGit(["fetch", "origin", branch], repoDir, { allowFailure: true }); +} + +// Verify gh auth +const authCheck = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }); +if (authCheck.status !== 0) { + console.error("PROMPT_GH_AUTH"); + process.exit(2); +} + +const mergeMethod = process.argv[2] || "merge"; +if (!["merge", "squash", "rebase"].includes(mergeMethod)) { + console.error("ERROR: Invalid merge method. Use: merge, squash, or rebase"); + process.exit(1); +} + +// --- Core functions --- + +/** + * Rebase a branch onto the latest upstream. + * Returns: { status, conflictFiles? } + * status: "success" | "changelog_conflict" | "code_conflict" | "error" + * + * On changelog_conflict, the rebase is LEFT IN PROGRESS for agent resolution. + * On all other failures, the rebase is aborted to leave the repo clean. + */ +function rebaseOntoUpstream(repoDir, branch, repo) { + const upstream = getUpstreamBranch(repo); + + runGit(["fetch", "origin"], repoDir); + + try { + runGit(["checkout", branch], repoDir); + } catch (e) { + return { status: "error", message: `Checkout failed: ${e.message}` }; + } + + const rebaseResult = runGit(["rebase", upstream], repoDir, { + allowFailure: true, + }); + + if (rebaseResult.success) { + return { status: "success" }; + } + + // Conflict detected — analyze + const combinedOutput = rebaseResult.stdout + "\n" + rebaseResult.stderr; + let conflictFiles = parseConflictFiles(combinedOutput); + + if (conflictFiles.length === 0) { + try { + const statusResult = runGit(["status", "--porcelain"], repoDir, { + allowFailure: true, + }); + for (const line of statusResult.stdout.split("\n")) { + if (line.startsWith("UU ") || line.startsWith("AA ")) { + conflictFiles.push(line.slice(3).trim()); + } + } + } catch {} + } + + if (conflictFiles.some((f) => !f.includes("CHANGELOG"))) { + runGit(["rebase", "--abort"], repoDir, { allowFailure: true }); + return { status: "code_conflict", conflictFiles }; + } + + if (isChangelogOnly(conflictFiles)) { + return { status: "changelog_conflict", conflictFiles }; + } + + runGit(["rebase", "--abort"], repoDir, { allowFailure: true }); + return { status: "error", message: "Unknown conflict type", conflictFiles }; +} + +function checkPRStatus(repo, prNumber) { + try { + const data = ghApi(`repos/EdgeApp/${repo}/pulls/${prNumber}`); + return { + state: data.state, + merged: data.merged || false, + mergeable: data.mergeable, + mergeable_state: data.mergeable_state, + }; + } catch (e) { + return { error: `Failed to fetch PR status: ${e.message}` }; + } +} + +function mergePR(repo, prNumber, branch) { + const commitTitle = `Merge pull request #${prNumber} from EdgeApp/${branch}`; + + try { + const data = ghApi(`repos/EdgeApp/${repo}/pulls/${prNumber}/merge`, { + method: "PUT", + body: { + merge_method: mergeMethod, + commit_title: mergeMethod === "merge" ? commitTitle : undefined, + }, + }); + return { + repo, + prNumber, + branch, + success: data?.merged || false, + merged: data?.merged || false, + message: data?.message, + sha: data?.sha, + }; + } catch (e) { + return { + repo, + prNumber, + branch, + success: false, + merged: false, + message: e.message, + }; + } +} + +// --- Main --- + +async function main() { + let input = ""; + for await (const chunk of process.stdin) { + input += chunk; + } + + const prs = JSON.parse(input); + const results = { + merged: [], + failed: [], + skipped: [], + pending: [], + verificationFailed: null, + changelogConflict: null, + conflict: null, + method: mergeMethod, + status: "complete", + }; + + let exitCode = 0; + + for (let i = 0; i < prs.length; i++) { + const { repo, prNumber, branch } = prs[i]; + const repoDir = getRepoDir(repo); + + console.error( + `\n=== Merging ${repo}#${prNumber} (${branch}) [${i + 1}/${prs.length}] ===` + ); + + // CHECK: Is PR already merged? + const prStatus = checkPRStatus(repo, prNumber); + if (prStatus.merged) { + console.error("✓ Already merged — skipping"); + results.merged.push({ + repo, + prNumber, + branch, + success: true, + merged: true, + sha: "already-merged", + message: "Already merged", + }); + continue; + } + + // STEP 1: Rebase onto latest upstream + console.error("Rebasing onto latest upstream..."); + const rebaseResult = rebaseOntoUpstream(repoDir, branch, repo); + + if (rebaseResult.status === "changelog_conflict") { + console.error("\n=== CHANGELOG conflict — agent resolution needed ==="); + console.error(`Files: ${rebaseResult.conflictFiles.join(", ")}`); + console.error("\nTo resolve:"); + console.error( + ` 1. Read ${path.join(repoDir, "CHANGELOG.md")} with conflict markers` + ); + console.error( + " 2. Resolve semantically (upstream entries first, then ours)" + ); + console.error(" 3. git add CHANGELOG.md && git rebase --continue"); + console.error(" 4. git push --force-with-lease"); + console.error(" 5. Re-run merge"); + results.changelogConflict = { + repo, + prNumber, + branch, + repoDir, + conflictFiles: rebaseResult.conflictFiles, + }; + results.status = "changelog_conflict_needs_resolution"; + results.pending = prs.slice(i + 1); + exitCode = 4; + break; + } + + if (rebaseResult.status === "code_conflict") { + console.error(`⚠ Code conflict — skipping`); + console.error( + ` Conflicting files: ${rebaseResult.conflictFiles.join(", ")}` + ); + results.skipped.push({ + repo, + prNumber, + branch, + repoDir, + reason: "Code conflict", + conflictFiles: rebaseResult.conflictFiles, + }); + continue; + } + + if (rebaseResult.status !== "success") { + console.error( + `⚠ Rebase failed: ${rebaseResult.message || rebaseResult.status} — skipping` + ); + results.skipped.push({ + repo, + prNumber, + branch, + repoDir, + reason: `Rebase failed: ${rebaseResult.message || rebaseResult.status}`, + }); + continue; + } + + console.error("✓ Rebase complete"); + + // STEP 1b: Install dependencies and prepare after rebase + try { + installAndPrepare(repoDir); + } catch (e) { + console.error(`✗ Dependency install failed: ${e.message}`); + results.failed.push({ + repo, + prNumber, + branch, + success: false, + message: `Dependency install failed: ${e.message}`, + }); + continue; + } + + // STEP 2: Push rebased branch + console.error("Pushing rebased branch..."); + const pushResult = runGit( + ["push", "--force-with-lease", "origin", branch], + repoDir, + { allowFailure: true } + ); + if (!pushResult.success) { + fetchBranchForPush(repoDir, branch); + const branchState = describeBranchState(repoDir, branch); + console.error(`✗ Push failed: ${pushResult.stderr}`); + console.error(branchState); + results.failed.push({ + repo, + prNumber, + branch, + success: false, + message: `Push failed: ${pushResult.stderr}`, + }); + continue; + } + console.error("✓ Pushed"); + + // STEP 3: Run local verification (MANDATORY — no bypass) + console.error("Running local verification (MANDATORY)..."); + const verification = runVerification(repoDir, getUpstreamBranch(repo), { + skipInstall: true, + }); + + if (!verification.success) { + console.error("\n=== STOP: Verification failed ==="); + console.error( + `PR ${repo}#${prNumber} cannot be merged until verification passes.` + ); + results.verificationFailed = { + repo, + prNumber, + branch, + repoDir, + exitCode: verification.exitCode, + }; + results.status = "verification_failed"; + results.pending = prs.slice(i + 1); + exitCode = 1; + break; + } + + console.error("✓ Verification passed"); + + // STEP 4: Merge via GitHub API + console.error("Merging via GitHub API..."); + + // Brief pause to let GitHub process the push + await new Promise((resolve) => setTimeout(resolve, 2000)); + + const mergeResult = mergePR(repo, prNumber, branch); + + if (mergeResult.success && mergeResult.merged) { + results.merged.push(mergeResult); + console.error(`✓ Merged: ${mergeResult.sha?.slice(0, 7)}`); + } else { + console.error(`✗ Merge failed: ${mergeResult.message}`); + results.failed.push(mergeResult); + } + } + + // --- Summary --- + console.error("\n=== Merge Summary ==="); + if (results.merged.length > 0) { + console.error(`Merged (${results.merged.length}):`); + for (const r of results.merged) { + const sha = + r.sha === "already-merged" ? "already merged" : r.sha?.slice(0, 7); + console.error(` ✓ ${r.repo}#${r.prNumber} (${sha})`); + } + } + if (results.skipped.length > 0) { + console.error(`\nSkipped (${results.skipped.length}):`); + for (const r of results.skipped) { + console.error(` ⚠ ${r.repo}#${r.prNumber}: ${r.reason}`); + } + } + if (results.conflict) { + console.error(`\nConflict (STOPPED):`); + console.error( + ` ✗ ${results.conflict.repo}#${results.conflict.prNumber}: ${results.conflict.reason}` + ); + } + if (results.changelogConflict) { + console.error("\nCHANGELOG conflict (agent can resolve):"); + console.error( + ` ⚠ ${results.changelogConflict.repo}#${results.changelogConflict.prNumber}` + ); + console.error( + ` Files: ${results.changelogConflict.conflictFiles.join(", ")}` + ); + } + if (results.verificationFailed) { + console.error("\nVerification failed (STOPPED):"); + console.error( + ` ✗ ${results.verificationFailed.repo}#${results.verificationFailed.prNumber}` + ); + } + if (results.failed.length > 0) { + console.error(`\nFailed (${results.failed.length}):`); + for (const r of results.failed) { + console.error(` ✗ ${r.repo}#${r.prNumber}: ${r.message}`); + } + } + if (results.pending.length > 0) { + console.error(`\nPending (${results.pending.length}):`); + for (const p of results.pending) { + console.error(` ⏸ ${p.repo}#${p.prNumber}`); + } + } + + console.log(JSON.stringify(results, null, 2)); + process.exit(exitCode); +} + +main().catch((e) => { + console.error(e); + process.exit(1); +}); diff --git a/.cursor/skills/pr-land/scripts/pr-land-prepare.sh b/.cursor/skills/pr-land/scripts/pr-land-prepare.sh new file mode 100755 index 0000000..a43b147 --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-prepare.sh @@ -0,0 +1,263 @@ +#!/usr/bin/env node +// pr-land-prepare.sh +// Prepares a branch for merge: checkout, autosquash, rebase, verify. +// Uses edge-repo.js for shared utilities (no GitHub API calls needed). +// +// Usage: echo '[{"repo":"edge-react-gui","branch":"jon/feature"}]' | ./pr-land-prepare.sh +// +// For each branch: +// 1. Checkout + fetch +// 2. Autosquash fixup commits +// 3. Rebase onto upstream (origin/master or origin/develop for GUI) +// 4. Detect conflicts: code files = SKIP, CHANGELOG-only = report +// 5. Run full verification (CHANGELOG + code) +// +// Exit codes: +// 0 = At least one branch prepared (or has resolvable CHANGELOG conflict) +// 1 = All branches failed (verification or other errors, none ready) +// +// Output: JSON with results for each branch + +const { execSync } = require("child_process"); +const { existsSync } = require("fs"); +const path = require("path"); +const { + getRepoDir, + getUpstreamBranch, + runGit, + parseConflictFiles, + isChangelogOnly, + runVerification, + installAndPrepare, +} = require(path.join(__dirname, "edge-repo.js")); + +function describeBranchState(repoDir, branch) { + const parts = []; + const local = runGit(["rev-parse", branch], repoDir, { allowFailure: true }); + if (local.success) { + parts.push(`Local commit (${branch}): ${local.stdout}`); + } else { + parts.push(`Local branch "${branch}" missing`); + } + + const remote = runGit(["rev-parse", `origin/${branch}`], repoDir, { allowFailure: true }); + if (remote.success) { + parts.push(`Remote commit (origin/${branch}): ${remote.stdout}`); + } else { + parts.push(`Remote branch origin/${branch} missing`); + } + + const status = runGit(["status", "-sb"], repoDir, { allowFailure: true }); + if (status.stdout) { + parts.push(`Status: ${status.stdout.trim()}`); + } + return parts.join("\n"); +} + +async function prepareBranch(repo, branch) { + const repoDir = getRepoDir(repo); + const upstream = getUpstreamBranch(repo); + const result = { + repo, + branch, + repoDir, + status: "unknown", + message: "", + }; + + console.error(`\n=== Preparing ${repo}/${branch} ===`); + console.error(`Directory: ${repoDir}`); + + // Step 1: Ensure repo exists + if (!existsSync(path.join(repoDir, ".git"))) { + console.error(`Cloning ${repo}...`); + try { + execSync(`git clone git@github.com:EdgeApp/${repo}.git "${repoDir}"`, { + stdio: "inherit", + }); + } catch (e) { + result.status = "clone_failed"; + result.message = "Failed to clone repository"; + return result; + } + } + + // Step 2: Fetch and checkout + console.error(`Fetching and checking out ${branch}...`); + try { + runGit(["fetch", "origin"], repoDir); + runGit(["fetch", "origin", branch], repoDir, { allowFailure: true }); + runGit(["checkout", branch], repoDir); + runGit(["pull", "--ff-only", "origin", branch], repoDir, { + allowFailure: true, + }); + } catch (e) { + result.status = "checkout_failed"; + result.message = e.message; + return result; + } + + // Step 3: Autosquash fixup commits + console.error("Autosquashing fixup commits..."); + try { + const baseResult = runGit(["merge-base", upstream, "HEAD"], repoDir); + const base = baseResult.stdout; + runGit(["rebase", "-i", base, "--autosquash"], repoDir); + console.error("✓ Autosquash complete"); + } catch (e) { + runGit(["rebase", "--abort"], repoDir, { allowFailure: true }); + result.status = "autosquash_failed"; + result.message = e.message; + return result; + } + + // Step 4: Rebase onto upstream + console.error(`Rebasing onto ${upstream}...`); + const rebaseResult = runGit(["rebase", upstream], repoDir, { + allowFailure: true, + }); + + if (!rebaseResult.success) { + const combinedOutput = rebaseResult.stdout + "\n" + rebaseResult.stderr; + const conflictFiles = parseConflictFiles(combinedOutput); + + console.error(`Conflict detected in: ${conflictFiles.join(", ")}`); + + if (conflictFiles.some((f) => !f.includes("CHANGELOG"))) { + console.error("\n=== Skipping: Code conflict detected ==="); + for (const f of conflictFiles) { + console.error(` - ${f}`); + } + runGit(["rebase", "--abort"], repoDir, { allowFailure: true }); + result.status = "code_conflict"; + result.message = "Code conflict — skipped"; + result.conflictFiles = conflictFiles; + return result; + } + + if (isChangelogOnly(conflictFiles)) { + console.error( + "\nCHANGELOG-only conflict. Resolve semantically, then re-run." + ); + runGit(["rebase", "--abort"], repoDir, { allowFailure: true }); + result.status = "changelog_conflict"; + result.message = "CHANGELOG conflict - resolve semantically, then re-run"; + result.conflictFiles = conflictFiles; + return result; + } + } + + console.error("✓ Rebase complete"); + + // Step 5: Install dependencies and prepare + try { + installAndPrepare(repoDir); + } catch (e) { + result.status = "install_failed"; + result.message = `Dependency install failed: ${e.message}`; + return result; + } + + // Step 6: Run verification (lint scoped to files changed vs upstream) + console.error("\nRunning verification..."); + const verifyResult = runVerification(repoDir, upstream, { + skipInstall: true, + }); + + if (!verifyResult.success) { + console.error("Branch state:"); + console.error(describeBranchState(repoDir, branch)); + result.status = "verification_failed"; + result.message = `Verification failed (exit code ${verifyResult.exitCode})`; + return result; + } + + result.status = "ready"; + result.message = "Branch prepared and verified successfully"; + return result; +} + +async function main() { + let input = ""; + for await (const chunk of process.stdin) { + input += chunk; + } + + const branches = JSON.parse(input); + const results = { + prepared: [], + failed: [], + skipped: [], + changelogConflicts: [], + }; + + let exitCode = 0; + + for (const { repo, branch } of branches) { + const result = await prepareBranch(repo, branch); + + switch (result.status) { + case "ready": + results.prepared.push(result); + break; + case "code_conflict": + results.skipped.push(result); + break; + case "changelog_conflict": + results.changelogConflicts.push(result); + break; + default: + results.failed.push(result); + exitCode = Math.max(exitCode, 1); + } + } + + // Summary + console.error("\n=== Prepare Summary ==="); + if (results.prepared.length > 0) { + console.error(`Ready (${results.prepared.length}):`); + for (const r of results.prepared) { + console.error(` ✓ ${r.repo}/${r.branch}`); + } + } + if (results.skipped.length > 0) { + console.error(`\nSkipped — code conflicts (${results.skipped.length}):`); + for (const r of results.skipped) { + console.error( + ` ⚠ ${r.repo}/${r.branch}: ${r.conflictFiles?.join(", ")}` + ); + } + } + if (results.changelogConflicts.length > 0) { + console.error( + `\nCHANGELOG conflicts (${results.changelogConflicts.length}):` + ); + for (const r of results.changelogConflicts) { + console.error( + ` ⚠ ${r.repo}/${r.branch}: Resolve semantically, then re-run` + ); + } + } + if (results.failed.length > 0) { + console.error(`\nFailed (${results.failed.length}):`); + for (const r of results.failed) { + console.error(` ✗ ${r.repo}/${r.branch}: ${r.message}`); + } + } + + if ( + results.prepared.length === 0 && + results.changelogConflicts.length === 0 && + exitCode === 0 + ) { + exitCode = 1; + } + + console.log(JSON.stringify(results, null, 2)); + process.exit(exitCode); +} + +main().catch((e) => { + console.error(e); + process.exit(1); +}); diff --git a/.cursor/skills/pr-land/scripts/pr-land-publish.sh b/.cursor/skills/pr-land/scripts/pr-land-publish.sh new file mode 100755 index 0000000..92df9c1 --- /dev/null +++ b/.cursor/skills/pr-land/scripts/pr-land-publish.sh @@ -0,0 +1,293 @@ +#!/usr/bin/env node +// pr-land-publish.sh +// Version bump, changelog update, commit, and tag for npm publishing +// Usage: echo '[{"repo":"edge-exchange-plugins","branch":"master"}]' | ./pr-land-publish.sh +// +// How it works: +// 1. Checks out the branch and fetches latest +// 2. Parses CHANGELOG.md for unreleased entries +// 3. Runs verification (yarn verify or yarn tsc && yarn lint) +// 4. Bumps version (minor for added/changed, patch for fixed) +// 5. Updates CHANGELOG.md with version header +// 6. Commits and tags locally (does NOT push) +// 7. Returns JSON with needsPush flag +// +// The agent should: +// - Show the user the version bump details and ask for confirmation +// - If confirmed, push master + tag to origin +// - Then prompt the user to run `npm publish` in a real terminal +// +// Exit codes: +// 0 = Version bumped, committed, tagged (check needsPush in JSON) +// 1 = Verification failed +// 2 = No unreleased changes + +const { execSync } = require("child_process"); +const { existsSync, readFileSync, writeFileSync } = require("fs"); +const path = require("path"); +const { getRepoDir, runGit: _runGit, installAndPrepare } = require(path.join(__dirname, "edge-repo.js")); + +// Thin wrapper: publish only needs the stdout string from runGit +function runGit(args, cwd) { + return _runGit(typeof args === "string" ? args.split(" ") : args, cwd).stdout; +} + +function parseChangelog(repoDir) { + const changelogPath = path.join(repoDir, "CHANGELOG.md"); + if (!existsSync(changelogPath)) { + return { entries: [], patchOnly: true, error: "No CHANGELOG.md found" }; + } + + const content = readFileSync(changelogPath, "utf8"); + const unreleasedStart = content.indexOf("## Unreleased"); + + if (unreleasedStart === -1) { + return { entries: [], patchOnly: true, error: "No ## Unreleased section" }; + } + + const nextVersionStart = content.indexOf("## ", unreleasedStart + "## Unreleased".length); + const unreleasedSection = content.substring( + unreleasedStart + "## Unreleased".length, + nextVersionStart !== -1 ? nextVersionStart : undefined + ).trim(); + + const entries = unreleasedSection.split("\n") + .map(line => line.trim()) + .filter(line => line.length > 0 && !line.startsWith("## ")); + + if (entries.length === 0) { + return { entries: [], patchOnly: true, error: "No entries in Unreleased section" }; + } + + // Validate entries and determine version bump + const allowedTags = ["- added:", "- changed:", "- deprecated:", "- removed:", "- fixed:", "- security:"]; + let patchOnly = true; + + for (const entry of entries) { + const hasValidTag = allowedTags.some(tag => entry.startsWith(tag)); + if (!hasValidTag) { + return { entries, patchOnly: true, error: `Invalid entry format: ${entry}` }; + } + + // Minor version bump for added/changed + if (entry.startsWith("- added:") || entry.startsWith("- changed:")) { + patchOnly = false; + } + } + + return { entries, patchOnly, error: null }; +} + +function bumpVersion(repoDir, patchOnly) { + const pkgPath = path.join(repoDir, "package.json"); + const pkg = JSON.parse(readFileSync(pkgPath, "utf8")); + const parts = pkg.version.split(".").map(Number); + + if (patchOnly) { + parts[2]++; + } else { + parts[1]++; + parts[2] = 0; + } + + const newVersion = parts.join("."); + pkg.version = newVersion; + writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n"); + + return { oldVersion: pkg.version, newVersion }; +} + +function updateChangelog(repoDir, newVersion) { + const changelogPath = path.join(repoDir, "CHANGELOG.md"); + let content = readFileSync(changelogPath, "utf8"); + + const date = new Date().toISOString().split("T")[0]; + const newHeading = `## ${newVersion} (${date})`; + + content = content.replace( + "## Unreleased", + `## Unreleased\n\n${newHeading}` + ); + + writeFileSync(changelogPath, content); +} + +function checkNpmPublished(packageName, version) { + try { + const info = execSync(`npm view ${packageName}@${version} version`, { + encoding: "utf8", + stdio: "pipe" + }).trim(); + return info === version; + } catch (e) { + return false; + } +} + +async function publishRepo(repo, branch) { + const repoDir = getRepoDir(repo); + const results = { + repo, + branch, + repoDir, + success: false + }; + + console.error(`\n=== Publishing ${repo} ===`); + console.error(`Directory: ${repoDir}`); + + // 1. Ensure we're on the right branch and up to date + try { + runGit("fetch origin", repoDir); + runGit(`checkout ${branch}`, repoDir); + runGit(`reset --hard origin/${branch}`, repoDir); + } catch (e) { + results.error = `Git checkout failed: ${e.message}`; + return results; + } + + // 2. Get current package info + const pkgPath = path.join(repoDir, "package.json"); + const currentPkg = JSON.parse(readFileSync(pkgPath, "utf8")); + const currentVersion = currentPkg.version; + const packageName = currentPkg.name; + + // 3. Check if current version is already published + const isPublished = checkNpmPublished(packageName, currentVersion); + + if (isPublished) { + // Version already published - do full version bump flow + const changelog = parseChangelog(repoDir); + if (changelog.error) { + results.error = changelog.error; + results.exitCode = 2; + return results; + } + + console.error(`\nChangelog entries (${changelog.entries.length}):`); + for (const entry of changelog.entries) { + console.error(` ${entry}`); + } + console.error(`\nVersion bump: ${changelog.patchOnly ? "PATCH" : "MINOR"}`); + + // Run verification + console.error("\nRunning verification..."); + try { + installAndPrepare(repoDir); + + const pkg = JSON.parse(readFileSync(pkgPath, "utf8")); + if (pkg.scripts?.verify) { + execSync("yarn verify", { cwd: repoDir, stdio: "inherit" }); + } else { + execSync("yarn tsc && yarn lint", { cwd: repoDir, stdio: "inherit" }); + } + } catch (e) { + results.error = "Verification failed"; + results.exitCode = 1; + return results; + } + console.error("✓ Verification passed"); + + // Bump version + const { newVersion } = bumpVersion(repoDir, changelog.patchOnly); + console.error(`\nVersion: ${currentVersion} → ${newVersion}`); + + // Update changelog + updateChangelog(repoDir, newVersion); + console.error("✓ Updated CHANGELOG.md"); + + // Commit and tag (do NOT push yet - agent will prompt user first) + try { + runGit("add package.json CHANGELOG.md", repoDir); + execSync(`git commit -m "v${newVersion}" --no-verify`, { cwd: repoDir, stdio: "pipe" }); + runGit(`tag v${newVersion}`, repoDir); + console.error(`✓ Committed and tagged v${newVersion}`); + } catch (e) { + results.error = `Git commit failed: ${e.message}`; + return results; + } + + results.newVersion = newVersion; + results.needsPush = true; + results.success = true; + return results; + } else { + // Current version NOT published - check if already pushed + console.error(`\nVersion ${currentVersion} not yet published to npm`); + + let alreadyPushed = false; + try { + const remoteTags = runGit(`ls-remote --tags origin v${currentVersion}`, repoDir); + alreadyPushed = remoteTags.length > 0; + } catch (e) { + // ls-remote failed, assume not pushed + } + + results.newVersion = currentVersion; + results.needsPush = !alreadyPushed; + + if (alreadyPushed) { + console.error("Tag already pushed to origin."); + } else { + console.error("Version bump exists locally but has not been pushed yet."); + } + + results.success = true; + return results; + } +} + +async function main() { + let input = ""; + for await (const chunk of process.stdin) { + input += chunk; + } + + const repos = JSON.parse(input); + const results = { + published: [], + failed: [], + skipped: [] + }; + + let exitCode = 0; + + for (const { repo, branch } of repos) { + const result = await publishRepo(repo, branch || "master"); + + if (result.success) { + results.published.push(result); + } else if (result.exitCode === 2) { + results.skipped.push(result); + } else { + results.failed.push(result); + exitCode = result.exitCode || 1; + } + } + + // Summary + console.error("\n=== Publish Summary ==="); + if (results.published.length > 0) { + console.error(`Ready (${results.published.length}):`); + for (const r of results.published) { + console.error(` ✓ ${r.repo}@${r.newVersion}${r.needsPush ? " (needs push)" : " (already pushed)"}`); + } + } + if (results.skipped.length > 0) { + console.error(`Skipped (${results.skipped.length}):`); + for (const r of results.skipped) { + console.error(` ⏭ ${r.repo}: ${r.error}`); + } + } + if (results.failed.length > 0) { + console.error(`Failed (${results.failed.length}):`); + for (const r of results.failed) { + console.error(` ✗ ${r.repo}: ${r.error}`); + } + } + + console.log(JSON.stringify(results, null, 2)); + process.exit(exitCode); +} + +main().catch(e => { console.error(e); process.exit(1); }); diff --git a/.cursor/skills/pr-land/scripts/upgrade-dep.sh b/.cursor/skills/pr-land/scripts/upgrade-dep.sh new file mode 100755 index 0000000..255c4b1 --- /dev/null +++ b/.cursor/skills/pr-land/scripts/upgrade-dep.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Standalone replacement for the upgrade_dep shell function. +# Usage: upgrade-dep.sh <package> [version] +# +# Stashes working changes, upgrades a dependency in package.json, +# runs yarn + prepare, commits the result, then pops the stash. + +usage() { + echo "Usage: upgrade-dep.sh <package> [version]" + exit 1 +} + +package="" +new_version="" + +case "$#" in + 1) + package="$1" + ;; + 2) + package="$1" + new_version="$2" + ;; + *) + usage + ;; +esac + +# Stash any working changes +git stage . +git stash + +# Resolve latest version from npm if none provided +if [ -z "$new_version" ]; then + latest_version=$(npm view "$package" versions --json | jq -r '.[]' | sort -V | tail -n 1) + new_version="^$latest_version" +fi + +# Check if already at target version +current_version=$(jq -r ".dependencies[\"$package\"] // .devDependencies[\"$package\"]" package.json) +if [ "$current_version" = "$new_version" ]; then + echo "Error: $package is already at version $new_version" + git stash pop + exit 1 +fi + +# Update package.json +sed -i "" "s#\"$package\": \".*\"#\"$package\": \"$new_version\"#" package.json + +# Install and prepare +yarn && yarn prepare && yarn prepare.ios + +# Remove git+ prefixes from yarn.lock +sed -i "" "s/git+//" yarn.lock + +# Stage and commit +git add -A +git commit -m "Upgrade $package@$new_version" --no-verify diff --git a/.cursor/skills/pr-review/SKILL.md b/.cursor/skills/pr-review/SKILL.md new file mode 100644 index 0000000..967b230 --- /dev/null +++ b/.cursor/skills/pr-review/SKILL.md @@ -0,0 +1,123 @@ +--- +name: pr-review +description: Review PR changes against Edge coding conventions and post structured inline feedback to GitHub. Use when the user wants to review a pull request. +compatibility: Requires git, gh. +metadata: + author: j0ntz +--- + +<goal>Review PR changes against Edge coding conventions and post structured inline feedback to GitHub.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="standards-first">Read review standards BEFORE examining code. Load both `~/.cursor/rules/review-standards.mdc` and `~/.cursor/rules/typescript-standards.mdc` in parallel.</rule> +<rule id="use-companion-script">Use `scripts/github-pr-review.sh` for all GitHub API operations. Do not use raw `curl`, `gh`, or MCP tools inline.</rule> +<rule id="no-script-bypass">If a companion script fails, report the error and STOP. Do NOT fall back to raw `gh`, `curl`, or other workarounds.</rule> +<rule id="no-duplicate-feedback">Check existing reviews from the context output. Do not repeat feedback already given by another reviewer.</rule> +<rule id="batch-reads">When reviewing changed files, batch independent Read/Grep calls in a single message.</rule> +<rule id="script-timeouts">The companion script may take up to 30s. Set `block_until_ms: 60000` when invoking it.</rule> +</rules> + +<step id="1" name="Gather PR context"> +Run the companion script to fetch PR metadata, changed files with patches, and existing reviews: + +```bash +scripts/github-pr-review.sh context [--pr <number>] [--owner <owner>] [--repo <repo>] +``` + +If the user provides a PR URL or number, pass `--pr`. If they also specify a repo, pass `--owner` and `--repo`. If nothing is provided, the script auto-detects from the current branch. + +If the script exits code 2 with `PROMPT_GH_AUTH`, prompt: "`gh` CLI is not authenticated. Run `gh auth login` first." + +Save the output JSON — it contains `number`, `title`, `url`, `headRef`, `baseRef`, `headSha`, `reviews[]`, and `files[]` (with patches). +</step> + +<step id="2" name="Checkout PR branch"> +Checkout the PR branch to ensure file reads reflect the PR's code, not the current local branch: + +```bash +git fetch origin <headRef> && git checkout <headRef> +``` + +Replace `<headRef>` with the branch name from the context output (e.g., `william/fix-eth-sync`). + +If checkout fails due to uncommitted changes, prompt the user to stash or commit before proceeding. +</step> + +<step id="3" name="Load review standards"> +Read these files in parallel (skip any already present in `cursor_rules_context`): + +- `~/.cursor/rules/review-standards.mdc` +- `~/.cursor/rules/typescript-standards.mdc` +</step> + +<step id="4" name="Review changed files"> +For each changed file in the context output: + +1. Read the full file to understand surrounding context (batch reads in parallel) +2. Review the patch against all loaded standards +3. Check for: + - Convention violations from review-standards.mdc and typescript-standards.mdc + - Potential bugs or safety issues + - Performance concerns + - Unnecessary code, unnecessary JSX fragments, or missed simplifications + - Efficient memoization where necessary (memo, useHandler, useCallback) + +Categorize findings as: +- **Critical**: Must fix before merge +- **Warning**: Should address +- **Suggestion**: Consider for improvement + +Cross-reference findings against `reviews[]` from the context output. Omit any findings already raised by another reviewer. +</step> + +<step id="5" name="Submit review"> +If there are findings to report, prepare a review JSON and submit via the companion script: + +```bash +echo '<review-json>' | scripts/github-pr-review.sh submit \ + --pr <number> --owner <owner> --repo <repo> --sha <headSha> +``` + +Review JSON format: +```json +{ + "event": "COMMENT", + "body": "", + "comments": [ + { + "path": "src/file.ts", + "line": 42, + "side": "RIGHT", + "body": "Comment text" + } + ] +} +``` + +Use `"REQUEST_CHANGES"` for critical issues, `"COMMENT"` for suggestions only, `"APPROVE"` if no issues found. + +<sub-step name="Comment formatting"> +- Single line: use only `line` +- Multi-line range: use both `start_line` (first) and `line` (last) +- `side`: use `"RIGHT"` for new code (additions) +- Keep comments concise, use backtick formatting for code, bold, or italics +- 0 findings: No review needed +- 1 inline comment: Leave `body` empty (`""`) +- 2+ inline comments: Only add `body` if it provides necessary linking context +</sub-step> +</step> + +<step id="6" name="Summarize"> +After submitting (or if no findings), provide a summary in the chat response: +- Number of files reviewed +- Findings by category (critical, warning, suggestion) +- Link to the submitted review + - PR link [PR title](https://github.com/EdgeApp/<repo>/pull/5952) +</step> + +<edge-cases> +<case name="No PR found">Script exits with an error. Ask the user for a PR number or URL.</case> +<case name="No changed files">Report that the PR has no file changes.</case> +<case name="Large PR (>20 files)">Prioritize files with the most additions. Note any files skipped due to size.</case> +<case name="Server repo">If the repository name ends in `-server` or context indicates a server project, also review against the Server Conventions section in review-standards.mdc.</case> +</edge-cases> diff --git a/.cursor/skills/pr-review/scripts/github-pr-review.sh b/.cursor/skills/pr-review/scripts/github-pr-review.sh new file mode 100755 index 0000000..5bf9d88 --- /dev/null +++ b/.cursor/skills/pr-review/scripts/github-pr-review.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +# github-pr-review.sh — Fetch PR review context and submit reviews via gh CLI. +# +# Subcommands: +# context [--pr <number>] [--owner <o>] [--repo <r>] Fetch PR metadata + files + existing reviews +# submit --pr <n> --owner <o> --repo <r> --sha <sha> Post review (JSON on stdin) +# +# The `context` subcommand auto-detects the PR from the current branch if --pr is omitted. +# Total API calls: 2 (gh pr view + gh api for file patches). +# +# Exit codes: 0 = success, 1 = error, 2 = needs user input (e.g. gh not authenticated) +set -euo pipefail + +CMD="${1:-}" +shift || true + +OWNER="" REPO="" PR="" SHA="" +while [[ $# -gt 0 ]]; do + case "$1" in + --owner) OWNER="$2"; shift 2 ;; + --repo) REPO="$2"; shift 2 ;; + --pr) PR="$2"; shift 2 ;; + --sha) SHA="$2"; shift 2 ;; + *) echo "Unknown arg: $1" >&2; exit 1 ;; + esac +done + +require_gh() { + if ! command -v gh &>/dev/null; then + echo "Error: gh CLI not installed." >&2 + exit 1 + fi + if ! gh auth status &>/dev/null 2>&1; then + echo "PROMPT_GH_AUTH" >&2 + exit 2 + fi +} + +case "$CMD" in + context) + require_gh + + # --- Call 1: PR metadata + reviews via gh pr view --- + VIEW_ARGS=() + [[ -n "$PR" ]] && VIEW_ARGS+=("$PR") + [[ -n "$OWNER" && -n "$REPO" ]] && VIEW_ARGS+=("--repo" "$OWNER/$REPO") + + META=$(gh pr view ${VIEW_ARGS[@]+"${VIEW_ARGS[@]}"} \ + --json number,title,url,headRefName,headRefOid,baseRefName,reviews 2>&1) || { + echo "Error: Failed to fetch PR. Output: $META" >&2 + exit 1 + } + + # Parse owner/repo/number from the PR URL + NUMBER=$(echo "$META" | jq -r '.number') + URL=$(echo "$META" | jq -r '.url') + _OWNER=$(echo "$URL" | cut -d/ -f4) + _REPO=$(echo "$URL" | cut -d/ -f5) + + # --- Call 2: Changed files with patches (REST — GraphQL doesn't expose patches) --- + FILES=$(gh api "repos/$_OWNER/$_REPO/pulls/$NUMBER/files" --paginate 2>&1) || { + echo "Error: Failed to fetch PR files. Output: $FILES" >&2 + exit 1 + } + + # Merge into single structured JSON output + jq -n \ + --argjson meta "$META" \ + --argjson files "$FILES" \ + '{ + number: $meta.number, + title: $meta.title, + url: $meta.url, + headRef: $meta.headRefName, + baseRef: $meta.baseRefName, + headSha: $meta.headRefOid, + reviews: [($meta.reviews // [])[] | {user: .author.login, state: .state, body: .body}], + files: [$files[] | {path: .filename, status: .status, additions: .additions, deletions: .deletions, patch: .patch}] + }' + ;; + + submit) + require_gh + + if [[ -z "$PR" || -z "$OWNER" || -z "$REPO" || -z "$SHA" ]]; then + echo "Error: --pr, --owner, --repo, --sha required for submit" >&2 + exit 1 + fi + + # Read review JSON from stdin: { event, body, comments: [{path, line, body, start_line?, side?}] } + # Inject commit_id from --sha and POST to reviews endpoint + jq --arg sha "$SHA" '. + {commit_id: $sha}' | \ + gh api "repos/$OWNER/$REPO/pulls/$PR/reviews" -X POST --input - | \ + jq '{id: .id, state: .state, url: .html_url}' + ;; + + *) + echo "Usage: github-pr-review.sh {context|submit} [args]" >&2 + exit 1 + ;; +esac diff --git a/.cursor/skills/q/SKILL.md b/.cursor/skills/q/SKILL.md new file mode 100644 index 0000000..162a9bd --- /dev/null +++ b/.cursor/skills/q/SKILL.md @@ -0,0 +1,88 @@ +--- +name: q +description: Answer the user's question with maximum accuracy, objectivity, and intellectual honesty. Use when the user asks a question that needs careful, evidence-based answering. +metadata: + author: j0ntz +--- + +<goal> +Answer the user's question with maximum accuracy, objectivity, and intellectual honesty. +</goal> + +<rules description="Non-negotiable constraints. Read these before anything else."> +<rule id="no-sycophancy">Do not open with "Great question!", "Certainly!", "Absolutely!", or similar. Start with substance.</rule> +<rule id="no-filler">Do not pad responses with obvious restatements of the question or generic context the user already knows.</rule> +<rule id="no-unverified-claims">For claims about APIs, libraries, project conventions, or anything that could be outdated or wrong, either verify against the codebase/docs or state that you cannot verify. Pre-training knowledge is acceptable for stable, well-established concepts (language semantics, algorithms, etc.) but not for anything version-sensitive or project-specific.</rule> +<rule id="calibrated-confidence"> + When uncertain, say so explicitly with a qualifier (e.g., "I believe…", "Based on what I can see…"). Distinguish between "I lack information" and "this is genuinely debatable." + When confident, state things directly without qualifiers. Hedging on things you know well is noise, not honesty. +</rule> +<rule id="no-code-changes">This command is for answering only. Do not edit files, create files, or run commands that mutate state.</rule> +</rules> + +<step id="1" name="Identify ambiguity"> +Check whether the question has multiple valid interpretations that would lead to **materially different answers**. If so: + +1. List the interpretations (briefly, 1 line each). +2. Ask the user which they mean. +3. **Stop and wait.** Do not answer until the user clarifies. + +If the interpretations converge on the same conclusion, proceed and note which interpretation you chose. If unambiguous, proceed directly. +</step> + +<step id="2" name="Gather evidence"> +Decide whether tool calls are needed: + +<skip-tools> + Skip evidence gathering when: + - The question is conceptual, opinion-based, or about stable well-established knowledge you can answer with high confidence (e.g., "what does Array.map do?"). + - No tool output would change or strengthen the answer. +</skip-tools> + +<use-tools> + Use read-only tools (Read, Grep, Glob, SemanticSearch, WebSearch, WebFetch) when: + - The answer depends on codebase state, project conventions, or version-specific behavior. + - The answer could plausibly be wrong or outdated without verification. + + For codebase questions: search the relevant repo(s). + For external API/library questions: search the web for current official docs and cite the source. +</use-tools> +</step> + +<step id="3" name="Answer"> +<structure> + 1. **Direct answer first.** Lead with the answer, not background. A yes/no question gets yes/no with one sentence of justification. + 2. **Evidence/reasoning second.** Show what you found and how it supports the answer. Cite files, line numbers, or URLs. Omit this section entirely if no tools were used and the reasoning is self-evident. + 3. **Caveats last.** Note limitations, unknowns, or alternative interpretations. Omit if there are none. +</structure> + +<length> + Match response length to question complexity. A simple question gets 1-3 sentences. A complex question gets structured sections. Never pad. +</length> + +<multi-part> + If the user asks multiple things at once, answer each as a numbered section with its own direct-answer-first structure. +</multi-part> +</step> + +<edge-cases> +<case name="No clear answer"> + State that explicitly. Explain what would be needed to arrive at an answer (e.g., "This depends on X, which I cannot determine from the codebase alone"). +</case> + +<case name="Question contradicts codebase reality"> + Point out the contradiction with evidence. Do not silently conform to the user's premise if it's factually wrong. +</case> + +<case name="Multiple valid answers"> + Present them as alternatives with trade-offs. Do not pick one arbitrarily. +</case> + +<case name="Sources disagree"> + When the codebase contradicts official docs, or two sources conflict, present both with attribution. State which source you trust more and why (e.g., "The codebase uses X, but current docs recommend Y — the codebase may be on an older version"). +</case> + +<case name="Implementation feasibility question"> + If the user asks "Can you implement X?" or similar, treat it as a question about feasibility — not a request to start coding. Answer with: feasibility assessment first, trade-offs and approach options second, unknowns last. +</case> +</edge-cases> diff --git a/.cursor/skills/staging-cherry-pick/SKILL.md b/.cursor/skills/staging-cherry-pick/SKILL.md new file mode 100644 index 0000000..c3cc154 --- /dev/null +++ b/.cursor/skills/staging-cherry-pick/SKILL.md @@ -0,0 +1,91 @@ +--- +name: staging-cherry-pick +description: Cherry-pick merged PR commits onto the staging branch. Use after pr-land merges staging-targeted PRs to develop, or standalone when commits need to land on staging. +compatibility: Requires git, gh, node. +metadata: + author: j0ntz +--- + +<goal>Cherry-pick individual commits from merged PRs onto the `staging` branch, resolving CHANGELOG conflicts semantically when they arise.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="individual-commits">Cherry-pick each commit individually — NEVER cherry-pick the merge commit itself. Extract non-merge commits via `git log --reverse <merge>^1..<merge>^2`.</rule> +<rule id="pull-first">ALWAYS pull the latest staging branch before cherry-picking.</rule> +<rule id="changelog-conflicts">CHANGELOG conflicts: Agent resolves semantically (existing staging entries first, then the new entry). Code conflicts: STOP and report.</rule> +<rule id="no-force-push">Do NOT force-push staging without explicit user confirmation.</rule> +<rule id="no-editors">Never open editors. All git operations must be non-interactive: `GIT_EDITOR=true` for commit messages.</rule> +<rule id="push-confirmation">After all cherry-picks succeed, ask user before pushing to origin/staging.</rule> +<rule id="scripts-only">Use the companion script for cherry-pick operations. Do NOT manually run git cherry-pick sequences.</rule> +<rule id="unexpected-exit">Unexpected exit codes → STOP immediately and report to user.</rule> +</rules> + +<scripts description="Companion scripts and their expected exit codes."> + +| Script | Purpose | +|--------|---------| +| `staging-cherry-pick.sh` | Cherry-pick PR commits onto staging | + +| Script | Exit 0 | Exit 1 | Exit 2 | Exit 3 | +|--------|--------|--------|--------|--------| +| `staging-cherry-pick.sh` | All cherry-picks succeeded | Error (code conflict, git failure) | Auth needed | CHANGELOG conflict (agent resolves) | + +**Any exit code not in this table = STOP immediately and report to user.** +</scripts> + +<step id="1" name="Identify Staging PRs"> +Determine which merged PRs have CHANGELOG entries in the `## X.Y.Z (staging)` section. These are the PRs that need cherry-picking. + +**When called from pr-land:** The caller provides the list of merged PRs and their merge SHAs. + +**When called standalone:** Read the CHANGELOG diff for each PR to check if entries target the staging section. +</step> + +<step id="2" name="Cherry-Pick"> +```bash +echo '[{"repo":"...","prNumber":123,"mergeSha":"abc123"}]' | ~/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh +``` + +The script handles: +1. Fetching the merge commit SHA (from input or GitHub API) +2. Extracting individual commits from the merge +3. Checking out and pulling the staging branch +4. Cherry-picking each commit in order (oldest first) +5. Detecting and classifying conflicts + +**On exit 3 (CHANGELOG conflict):** +1. Read the CHANGELOG with conflict markers +2. Resolve semantically: keep existing staging entries, add the new entry +3. `git add CHANGELOG.md && GIT_EDITOR=true git cherry-pick --continue` +4. Re-run the script for any remaining PRs +</step> + +<step id="3" name="Push"> +After all cherry-picks succeed, show the user what will be pushed: + +``` +Cherry-picked to staging: + ✓ <repo>#<number> (<N> commits) + ✓ <repo>#<number> (<N> commits) + +Push to origin/staging? [y/N] +``` + +If confirmed: +```bash +git push origin staging +``` +</step> + +<step id="4" name="Restore Branch"> +Return to the branch the user was on before cherry-picking: +```bash +git checkout <original-branch> +``` +</step> + +<edge-cases> +<case name="Empty cherry-pick">If a commit is already on staging (empty cherry-pick), the script skips it automatically.</case> +<case name="Code conflict">Script aborts the cherry-pick and reports the conflicting files. Agent STOPs and reports to user.</case> +<case name="Multiple PRs">Script processes PRs sequentially. Staging is checked out once and reused across PRs.</case> +<case name="No merge SHA provided">Script queries the GitHub API for the merge commit SHA.</case> +</edge-cases> diff --git a/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh b/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh new file mode 100755 index 0000000..4552557 --- /dev/null +++ b/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh @@ -0,0 +1,306 @@ +#!/usr/bin/env node +// staging-cherry-pick.sh +// Cherry-picks individual commits from merged PRs onto the staging branch. +// +// Usage: echo '[{"repo":"edge-react-gui","prNumber":123,"mergeSha":"abc123"}]' | ./staging-cherry-pick.sh +// +// For each PR: +// 1. Determine merge commit SHA (from input or by querying GitHub) +// 2. Extract non-merge commits: git log <merge>^1..<merge>^2 +// 3. Pull latest staging branch +// 4. Cherry-pick each commit individually (oldest first) +// 5. Report results +// +// Exit codes: +// 0 = All cherry-picks succeeded +// 1 = Error (auth, git failure, etc.) +// 3 = Cherry-pick conflict (agent must resolve) + +const { spawnSync } = require("child_process"); +const path = require("path"); +const { getRepoDir, runGit, ghApi } = require( + path.join(__dirname, "..", "..", "pr-land", "scripts", "edge-repo.js") +); + +// Verify gh auth +const authCheck = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }); +if (authCheck.status !== 0) { + console.error("PROMPT_GH_AUTH"); + process.exit(2); +} + +function getMergeCommit(repo, prNumber) { + const data = ghApi(`repos/EdgeApp/${repo}/pulls/${prNumber}`); + if (!data.merged) { + return { error: `PR #${prNumber} is not merged` }; + } + return { sha: data.merge_commit_sha }; +} + +function getCommitsToCherry(repoDir, mergeSha) { + // Extract non-merge commits from the PR: merge^1..merge^2 + // This gives us the branch commits in chronological order + const result = runGit( + ["log", "--reverse", "--format=%H %s", `${mergeSha}^1..${mergeSha}^2`], + repoDir, + { allowFailure: true } + ); + + if (!result.success || !result.stdout) { + return []; + } + + return result.stdout.split("\n").filter(Boolean).map((line) => { + const spaceIdx = line.indexOf(" "); + return { + sha: line.slice(0, spaceIdx), + message: line.slice(spaceIdx + 1), + }; + }); +} + +async function main() { + let input = ""; + for await (const chunk of process.stdin) { + input += chunk; + } + + const prs = JSON.parse(input); + const results = { + cherryPicked: [], + skipped: [], + conflict: null, + status: "complete", + }; + + let exitCode = 0; + let stagingCheckedOut = false; + let currentRepoDir = null; + + for (let i = 0; i < prs.length; i++) { + const { repo, prNumber, mergeSha: inputMergeSha } = prs[i]; + const repoDir = getRepoDir(repo); + currentRepoDir = repoDir; + + console.error( + `\n=== Cherry-picking ${repo}#${prNumber} to staging [${i + 1}/${prs.length}] ===` + ); + + // Get merge commit SHA + let mergeSha = inputMergeSha; + if (!mergeSha) { + console.error("Fetching merge commit SHA..."); + const mergeInfo = getMergeCommit(repo, prNumber); + if (mergeInfo.error) { + console.error(`⚠ ${mergeInfo.error} — skipping`); + results.skipped.push({ repo, prNumber, reason: mergeInfo.error }); + continue; + } + mergeSha = mergeInfo.sha; + } + console.error(`Merge commit: ${mergeSha.slice(0, 10)}`); + + // Fetch latest + runGit(["fetch", "origin"], repoDir); + + // Get commits to cherry-pick + const commits = getCommitsToCherry(repoDir, mergeSha); + if (commits.length === 0) { + console.error("⚠ No commits found to cherry-pick — skipping"); + results.skipped.push({ repo, prNumber, reason: "No commits found" }); + continue; + } + + console.error( + `Found ${commits.length} commit(s):\n${commits.map((c) => ` ${c.sha.slice(0, 10)} ${c.message}`).join("\n")}` + ); + + // Checkout staging (only once per repo) + if (!stagingCheckedOut) { + console.error("Checking out staging branch..."); + const checkoutResult = runGit(["checkout", "staging"], repoDir, { + allowFailure: true, + }); + if (!checkoutResult.success) { + // Try tracking remote + const trackResult = runGit( + ["checkout", "-b", "staging", "origin/staging"], + repoDir, + { allowFailure: true } + ); + if (!trackResult.success) { + console.error(`✗ Cannot checkout staging: ${trackResult.stderr}`); + results.skipped.push({ + repo, + prNumber, + reason: "Cannot checkout staging branch", + }); + continue; + } + } + + console.error("Pulling latest staging..."); + const pullResult = runGit(["pull", "origin", "staging"], repoDir, { + allowFailure: true, + }); + if (!pullResult.success) { + // Reset to remote if pull fails (e.g. diverged) + runGit(["reset", "--hard", "origin/staging"], repoDir); + } + stagingCheckedOut = true; + } + + // Cherry-pick each commit individually + for (let j = 0; j < commits.length; j++) { + const commit = commits[j]; + console.error( + `Cherry-picking [${j + 1}/${commits.length}]: ${commit.sha.slice(0, 10)} ${commit.message}` + ); + + const cpResult = runGit(["cherry-pick", commit.sha], repoDir, { + allowFailure: true, + }); + + if (!cpResult.success) { + // Check if it's a conflict + const statusResult = runGit(["status", "--porcelain"], repoDir, { + allowFailure: true, + }); + const conflictFiles = statusResult.stdout + .split("\n") + .filter((l) => l.startsWith("UU ") || l.startsWith("AA ")) + .map((l) => l.slice(3).trim()); + + if (conflictFiles.length > 0) { + const isChangelogOnly = + conflictFiles.length > 0 && + conflictFiles.every( + (f) => f === "CHANGELOG.md" || f.endsWith("/CHANGELOG.md") + ); + + if (isChangelogOnly) { + console.error( + "\n=== CHANGELOG conflict — agent resolution needed ===" + ); + console.error(`Files: ${conflictFiles.join(", ")}`); + console.error( + `Commit: ${commit.sha.slice(0, 10)} ${commit.message}` + ); + console.error("\nTo resolve:"); + console.error( + ` 1. Read ${path.join(repoDir, "CHANGELOG.md")} with conflict markers` + ); + console.error( + " 2. Resolve semantically (upstream/staging entries first, then ours)" + ); + console.error( + " 3. git add CHANGELOG.md && git cherry-pick --continue" + ); + console.error(" 4. Re-run staging-cherry-pick for remaining PRs"); + + results.conflict = { + repo, + prNumber, + repoDir, + commit: commit.sha, + commitMessage: commit.message, + conflictFiles, + type: "changelog", + remainingCommits: commits.slice(j + 1), + remainingPRs: prs.slice(i + 1), + }; + results.status = "changelog_conflict"; + exitCode = 3; + } else { + console.error(`✗ Code conflict in: ${conflictFiles.join(", ")}`); + console.error("Aborting cherry-pick..."); + runGit(["cherry-pick", "--abort"], repoDir, { + allowFailure: true, + }); + + results.conflict = { + repo, + prNumber, + repoDir, + commit: commit.sha, + commitMessage: commit.message, + conflictFiles, + type: "code", + }; + results.status = "code_conflict"; + exitCode = 1; + } + break; + } + + // Not a conflict — some other failure + console.error(`✗ Cherry-pick failed: ${cpResult.stderr}`); + + // Check if it's an empty commit (already applied) + if ( + cpResult.stderr.includes("empty") || + cpResult.stdout.includes("empty") + ) { + console.error(" (Commit already applied — skipping)"); + runGit(["cherry-pick", "--skip"], repoDir, { allowFailure: true }); + continue; + } + + runGit(["cherry-pick", "--abort"], repoDir, { allowFailure: true }); + results.skipped.push({ + repo, + prNumber, + reason: `Cherry-pick failed: ${cpResult.stderr}`, + }); + break; + } + + console.error(` ✓ Applied`); + } + + if (exitCode !== 0) break; + + // If we got here without conflict, all commits cherry-picked + if (!results.conflict) { + results.cherryPicked.push({ + repo, + prNumber, + mergeSha, + commits: commits.map((c) => ({ + sha: c.sha.slice(0, 10), + message: c.message, + })), + }); + } + } + + // Summary + console.error("\n=== Cherry-Pick Summary ==="); + if (results.cherryPicked.length > 0) { + console.error(`Cherry-picked (${results.cherryPicked.length}):`); + for (const r of results.cherryPicked) { + console.error( + ` ✓ ${r.repo}#${r.prNumber} (${r.commits.length} commit(s))` + ); + } + } + if (results.skipped.length > 0) { + console.error(`Skipped (${results.skipped.length}):`); + for (const r of results.skipped) { + console.error(` ⚠ ${r.repo}#${r.prNumber}: ${r.reason}`); + } + } + if (results.conflict) { + console.error( + `\nConflict: ${results.conflict.repo}#${results.conflict.prNumber} (${results.conflict.type})` + ); + } + + console.log(JSON.stringify(results, null, 2)); + process.exit(exitCode); +} + +main().catch((e) => { + console.error(e); + process.exit(1); +}); diff --git a/.cursor/skills/standup/SKILL.md b/.cursor/skills/standup/SKILL.md new file mode 100644 index 0000000..da5b022 --- /dev/null +++ b/.cursor/skills/standup/SKILL.md @@ -0,0 +1,255 @@ +--- +name: standup +description: Generate a daily standup document from Asana and GitHub activity, upload to a persistent private gist. Use when the user wants to create standup notes. +compatibility: Requires gh, jq. ASANA_TOKEN for Asana integration. +metadata: + author: j0ntz +--- + +<goal>Generate a daily standup document from Asana + GitHub activity, upload to a single persistent private gist.</goal> + +<rules> +<rule id="links-as-titles">Task/PR names are the clickable link: `[{name}]({url})`. Never add a separate URL.</rule> +<rule id="no-reassign-in-accomplishments">Reassignment actions belong ONLY in the Handoffs section. Never list them under Accomplishments.</rule> +<rule id="single-gist">All standup files go into ONE gist with description "HUDL Notes". Create on first run, add files on subsequent runs. Never overwrite — append a suffix (`-1`, `-2`, etc.) if the filename exists.</rule> +<rule id="cleanup">Delete the local file after successful gist upload.</rule> +<rule id="script-timeout">Set `block_until_ms: 120000` for each companion script.</rule> +</rules> + +<step id="1" name="Fetch activity from both sources"> +Run both companion scripts **in parallel** (two Shell tool calls in one message): + +```bash +scripts/asana-standup.sh +``` +```bash +scripts/github-pr-activity.sh +``` + +If the user supplies a specific date, pass `--date YYYY-MM-DD` to both. + +Capture stdout (JSON) and stderr (diagnostics) separately for each. +</step> + +<step id="2" name="Merge and deduplicate"> +Parse both JSON outputs. The GitHub JSON has `addressed` and `reviewed` arrays. Each entry may have an `asana_gid` field extracted from the PR body. Use it to link GitHub activity to Asana tasks: + +- **GitHub `addressed` + matching Asana task** (same `asana_gid`): Add an action `{"type": "addressed_review_comments", "detail": ""}` to the matching Asana task's `actions` array. Do NOT create a separate entry. +- **GitHub `addressed` + no Asana match**: Create a new task-like entry with `actions: [{"type": "addressed_review_comments", "detail": ""}]`, using the PR title, URL, and repo as the project. +- **GitHub `reviewed` + matching Asana task**: Add an action `{"type": "reviewed_pr", "detail": "{review_state}"}` to the matching Asana task's `actions` array. +- **GitHub `reviewed` + no Asana match**: Create a new task-like entry with `actions: [{"type": "reviewed_pr", "detail": "{review_state}"}]`, using the PR title, URL, and repo as the project. +</step> + +<step id="3" name="Generate markdown"> +Build the markdown file with EXACTLY the structure below. Every heading, bullet, and blank line matters. + +<sub-step name="3a: Header"> +Line 1 of the file. Use the TARGET date (from the Asana JSON `date` field), not today. + +``` +# HUDL Notes — {full_weekday_name} {full_month_name} {day}, {year} +``` + +Example: `# HUDL Notes — Monday February 17, 2026` +</sub-step> + +<sub-step name="3b: Accomplishments"> +``` +## Accomplishments {day_label} +``` + +Use `day_label` from the Asana JSON (either `"yesterday"` or `"Friday"`). + +Categorize each task/PR into exactly ONE subsection based on its PRIMARY action. Determine the primary action using this priority (highest first): + +1. `prd` → goes in **PR'd** +2. `addressed_pr_comments` OR `addressed_review_comments` → goes in **Addressed PR Comments** +3. `reviewed_pr` → goes in **Reviewed PRs** +4. anything else (`commented`, `completed`, `moved`, `added to project`) → goes in **General** + +A task appears in only ONE subsection — the highest-priority one that matches any of its actions. + +**Subsection: PR'd** — include only if at least one task qualifies. + +``` +### PR'd + +- [{task_name}]({task_url}) ({project_name}) +``` + +One bullet per task. No action text — the heading says it. Append `({project})` only if non-empty. + +If the task ALSO has secondary actions (like `commented`), append them after ` — `: + +``` +- [{task_name}]({task_url}) ({project_name}) — Commented: "first 150 chars" +``` + +**Subsection: Addressed PR Comments** — include only if at least one task qualifies. + +``` +### Addressed PR Comments + +- [{task_name}]({task_url}) ({project_name}) +``` + +Same format as PR'd. Append secondary actions after ` — ` if present. + +**Subsection: Reviewed PRs** — include only if at least one task qualifies. + +``` +### Reviewed PRs + +- [{pr_title}]({pr_url}) ({repo}) — approved +``` + +Append the review verdict in lowercase after ` — `. Map `review_state`: +- `APPROVED` → `approved` +- `CHANGES_REQUESTED` → `changes requested` +- `COMMENTED` → `commented` + +**Subsection: General** — include only if at least one task qualifies. + +``` +### General + +- [{task_name}]({task_url}) ({project_name}) — Commented: "first 150 chars" +``` + +Format each action type: +- `commented` → `Commented: "{detail}"` +- `completed` → `Completed` +- `moved` → `Moved: {detail}` +- `added to project` → `Added to {detail}` + +If a task has multiple actions in General, join with `; `: + +``` +- [{task_name}]({task_url}) ({project_name}) — Commented: "detail"; Completed +``` + +**Omit any subsection that would have zero bullets.** +</sub-step> + +<sub-step name="3c: Goals Today"> +``` +## Goals Today +``` + +Scan the Asana `tasks` array for entries where `status` equals `"Publish Needed"`. For each, write: + +``` +- Publish [{task_name}]({task_url}) +``` + +After all publish items (or immediately if there are none), add one blank bullet for the user to fill in: + +``` +- +``` +</sub-step> + +<sub-step name="3d: Handoffs"> +``` +## Handoffs +``` + +Group handoff entries by type, then by person. + +**Reassignments** — group by the `detail` field (assignee name). Write one `### {assignee_name}` heading per person, then list all tasks reassigned to them: + +``` +### William Swanson + +- [{task_name}]({task_url}) + +### Matthew Piche + +- [{task_name}]({task_url}) +``` + +**Blockers** — if any handoff has `kind=blocker`, add a Blocked subsection: + +``` +### Blocked + +- [{task_name}]({task_url}) — {detail} +``` + +If the handoffs array is completely empty, write: + +``` +None +``` +</sub-step> + +<sub-step name="3e: Debug"> +Add a horizontal rule, then a collapsed details block. + +``` +--- + +<details><summary>Debug: {N} active tasks</summary> + +``` + +Where `{N}` is the length of the `active_tasks` array from the Asana JSON. + +**Non-VN tasks**: For each entry in `active_tasks` where `status` is NOT `"Verification Needed"`, write: + +``` +- [{name}]({url}) — {status} ({role}) +``` + +**VN summary**: Count entries where `status` is `"Verification Needed"`. Group by `role` and write ONE summary line: + +``` +- {total} tasks in Verification Needed ({M} assignee, {X} implementor, {Y} reviewer) +``` + +Omit role counts that are zero. Example: `- 68 tasks in Verification Needed (5 assignee, 48 implementor, 15 reviewer)` + +End with the search stats and close the details tag: + +``` + +*Searched {candidate_count} candidates, matched {task_count}* + +</details> +``` + +`candidate_count` and `task_count` come from the Asana JSON. +</sub-step> +</step> + +<step id="4" name="Upload to gist and clean up"> +1. Write the markdown to `hudl-{date}.md` in the current working directory. +2. Upload to gist using this exact bash logic: + +```bash +GIST_ID=$(gh gist list --limit 100 --filter "HUDL Notes" | head -1 | awk '{print $1}') +FILENAME="hudl-{date}.md" + +if [ -n "$GIST_ID" ]; then + FILES=$(gh gist view "$GIST_ID" --files) + N=1 + BASE="hudl-{date}" + while echo "$FILES" | grep -q "$FILENAME"; do + N=$((N + 1)) + FILENAME="${BASE}-${N}.md" + done + [ "$FILENAME" != "hudl-{date}.md" ] && mv "hudl-{date}.md" "$FILENAME" + gh gist edit "$GIST_ID" --add "$FILENAME" +else + gh gist create --desc "HUDL Notes" "$FILENAME" + GIST_ID=$(gh gist list --limit 1 --filter "HUDL Notes" | awk '{print $1}') +fi + +rm "$FILENAME" +``` + +3. Present a brief summary to the user: + - Number of accomplishment items + - Number of handoffs + - Gist URL: `https://gist.github.com/{username}/{GIST_ID}` +</step> diff --git a/.cursor/skills/standup/scripts/asana-standup.sh b/.cursor/skills/standup/scripts/asana-standup.sh new file mode 100755 index 0000000..6a4830a --- /dev/null +++ b/.cursor/skills/standup/scripts/asana-standup.sh @@ -0,0 +1,282 @@ +#!/usr/bin/env bash +# asana-standup.sh — Fetch Asana tasks the user interacted with on a given day. +# Outputs structured JSON for standup document generation. +# +# Usage: +# asana-standup.sh [--date YYYY-MM-DD] +# +# If --date is omitted, defaults to yesterday (or Friday if today is Monday). +# +# Requires env var: ASANA_TOKEN +# +# Output: JSON { date, day_label, user_name, task_count, candidate_count, +# tasks: [...], handoffs: [...], active_tasks: [...] } +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" + +TARGET_DATE="" +while [[ $# -gt 0 ]]; do + case "$1" in + --date) TARGET_DATE="$2"; shift 2 ;; + *) echo "Unknown: $1" >&2; exit 1 ;; + esac +done + +if [[ -z "${ASANA_TOKEN:-}" ]]; then + echo "Error: ASANA_TOKEN not set" >&2 + exit 1 +fi + +USER_INFO=$("$SCRIPT_DIR/../../asana-whoami.sh" --name) +USER_GID=$(echo "$USER_INFO" | awk '{print $1}') +USER_NAME=$(echo "$USER_INFO" | cut -d' ' -f2-) + +CACHE_KEY=$(echo "$ASANA_TOKEN" | shasum -a 256 | cut -c1-16) +WORKSPACE_CACHE="/tmp/asana-workspace-$CACHE_KEY.txt" +if [[ -f "$WORKSPACE_CACHE" ]]; then + WORKSPACE_GID=$(cat "$WORKSPACE_CACHE") +else + WORKSPACE_GID=$(curl -s "https://app.asana.com/api/1.0/users/me?opt_fields=workspaces" \ + -H "Authorization: Bearer $ASANA_TOKEN" | \ + python3 -c "import sys,json; print(json.load(sys.stdin)['data']['workspaces'][0]['gid'])") + echo "$WORKSPACE_GID" > "$WORKSPACE_CACHE" +fi + +export ASANA_TOKEN USER_GID USER_NAME WORKSPACE_GID TARGET_DATE + +python3 - << 'PYEOF' +import json, os, re, sys, urllib.request, urllib.parse, urllib.error +from datetime import date, timedelta + +API = "https://app.asana.com/api/1.0" +TOKEN = os.environ["ASANA_TOKEN"] +USER_GID = os.environ["USER_GID"] +USER_NAME = os.environ["USER_NAME"] +WORKSPACE = os.environ["WORKSPACE_GID"] +TARGET_DATE_STR = os.environ.get("TARGET_DATE", "") + +STATUS_FIELD_GID = "1190660107346181" + + +def api_get(path, params=None): + url = f"{API}{path}" + if params: + url += "?" + urllib.parse.urlencode(params, doseq=True) + req = urllib.request.Request(url, headers={"Authorization": f"Bearer {TOKEN}"}) + try: + with urllib.request.urlopen(req) as resp: + return json.loads(resp.read()) + except urllib.error.HTTPError as e: + body = e.read().decode() if e.fp else "" + print(f"API_ERROR: {e.code} {path} {body[:200]}", file=sys.stderr) + return {"data": []} + + +# --- Date calculation --- +if TARGET_DATE_STR: + target = date.fromisoformat(TARGET_DATE_STR) + day_label = target.strftime("%A") +else: + today = date.today() + if today.weekday() == 0: # Monday + target = today - timedelta(days=3) + day_label = "Friday" + else: + target = today - timedelta(days=1) + day_label = "yesterday" + TARGET_DATE_STR = target.isoformat() + +# ±1 day buffer handles modified_on drift (task modified yesterday + today +# has modified_on=today, so we need the window slightly wider than exact day). +window_start = (target - timedelta(days=1)).isoformat() +window_end = (target + timedelta(days=1)).isoformat() + +# --- Search queries --- +search_path = f"/workspaces/{WORKSPACE}/tasks/search" +opt = "name,assignee.name,memberships.project.name,custom_fields.gid,custom_fields.display_value,permalink_url" + +search_filters = [ + {"assignee.any": USER_GID}, + {"assigned_by.any": USER_GID}, +] + +tasks_by_gid = {} +for extra in search_filters: + params = { + "modified_on.after": window_start, + "modified_on.before": window_end, + "opt_fields": opt, + "limit": "100", + **extra, + } + result = api_get(search_path, params) + for t in result.get("data", []): + if t["gid"] not in tasks_by_gid: + tasks_by_gid[t["gid"]] = t + +print(f"Found {len(tasks_by_gid)} candidate tasks", file=sys.stderr) + +candidate_count = len(tasks_by_gid) + +# --- Fetch stories per task, categorize user actions --- +output_tasks = [] +handoffs = [] + +for gid, task in tasks_by_gid.items(): + stories = api_get(f"/tasks/{gid}/stories", { + "opt_fields": "resource_subtype,text,created_by.gid,created_by.name,created_at", + "limit": "100", + }) + + story_list = stories.get("data", []) + + # Pass 1: Detect status transitions to "Review Needed" (any author) + pr_action = None + for s in story_list: + created_at = s.get("created_at", "")[:10] + if created_at != TARGET_DATE_STR: + continue + if s.get("resource_subtype") == "comment_added": + continue + text_lc = ((s.get("text") or "")).lower() + if re.search(r"to\s+'?review needed", text_lc): + if re.search(r"from\s+'?changes needed", text_lc): + pr_action = {"type": "addressed_pr_comments", "detail": ""} + else: + pr_action = {"type": "prd", "detail": ""} + + # Pass 2: User's own actions (comments, moves, etc.) + user_actions = [] + for s in story_list: + created_at = s.get("created_at", "")[:10] + if created_at != TARGET_DATE_STR: + continue + if (s.get("created_by") or {}).get("gid") != USER_GID: + continue + + subtype = s.get("resource_subtype", "") + text = (s.get("text") or "").strip() + short = (text[:150] + "...") if len(text) > 150 else text + + if subtype == "comment_added": + user_actions.append({"type": "commented", "detail": short}) + elif subtype == "assigned": + m = re.search(r'assigned to (.+)$', text) + target_name = m.group(1).strip() if m else "" + if target_name.lower() == "you" or target_name == USER_NAME: + continue + if not target_name: + target_name = (task.get("assignee") or {}).get("name", "someone") + handoffs.append({ + "gid": gid, + "name": task.get("name", ""), + "url": task.get("permalink_url", f"https://app.asana.com/0/0/{gid}/f"), + "kind": "reassigned", + "detail": target_name, + }) + elif subtype == "marked_complete": + user_actions.append({"type": "completed", "detail": short}) + elif subtype == "section_changed": + user_actions.append({"type": "moved", "detail": short}) + elif subtype == "added_to_project": + user_actions.append({"type": "added to project", "detail": short}) + + if pr_action: + user_actions.append(pr_action) + + if not user_actions: + continue + + project = "" + for m in task.get("memberships", []): + p = (m.get("project") or {}).get("name", "") + if p: + project = p + break + + status = "" + for f in task.get("custom_fields", []): + if f.get("gid") == STATUS_FIELD_GID: + status = f.get("display_value") or "" + break + + output_tasks.append({ + "gid": gid, + "name": task.get("name", ""), + "url": task.get("permalink_url", f"https://app.asana.com/0/0/{gid}/f"), + "project": project, + "status": status, + "assignee": (task.get("assignee") or {}).get("name", ""), + "actions": user_actions, + }) + + if "block" in status.lower(): + handoffs.append({ + "gid": gid, + "name": task.get("name", ""), + "url": task.get("permalink_url", f"https://app.asana.com/0/0/{gid}/f"), + "kind": "blocker", + "detail": f"Status: {status}", + }) + +# --- Active tasks where user is involved (for debug) --- +ACTIVE_STATUSES = {"Started", "Review Needed", "Changes Needed", "Publish Needed", "Verification Needed"} +IMPLEMENTOR_FIELD = "1203334386796983" +REVIEWER_FIELD = "1203334388004673" + +active_window_start = (target - timedelta(days=90)).isoformat() +active_result = api_get(search_path, { + "followers.any": USER_GID, + f"custom_fields.{STATUS_FIELD_GID}.is_set": "true", + "modified_on.after": active_window_start, + "opt_fields": "name,assignee.name,assignee.gid,custom_fields.gid,custom_fields.display_value,custom_fields.people_value.gid,permalink_url", + "limit": "100", +}) + +active_tasks = [] +seen_gids = set() +for t in active_result.get("data", []): + if t["gid"] in seen_gids: + continue + seen_gids.add(t["gid"]) + status_name = "" + is_implementor = False + is_reviewer = False + for f in t.get("custom_fields", []): + fgid = f.get("gid", "") + if fgid == STATUS_FIELD_GID: + status_name = f.get("display_value") or "" + elif fgid == IMPLEMENTOR_FIELD: + for p in (f.get("people_value") or []): + if (p or {}).get("gid") == USER_GID: + is_implementor = True + elif fgid == REVIEWER_FIELD: + for p in (f.get("people_value") or []): + if (p or {}).get("gid") == USER_GID: + is_reviewer = True + if status_name not in ACTIVE_STATUSES: + continue + assignee_gid = ((t.get("assignee") or {}).get("gid", "")) + if assignee_gid != USER_GID and not is_implementor and not is_reviewer: + continue + role = "assignee" if assignee_gid == USER_GID else ("implementor" if is_implementor else "reviewer") + active_tasks.append({ + "name": t.get("name", ""), + "url": t.get("permalink_url", f"https://app.asana.com/0/0/{t['gid']}/f"), + "status": status_name, + "assignee": (t.get("assignee") or {}).get("name", ""), + "role": role, + }) + +print(json.dumps({ + "date": TARGET_DATE_STR, + "day_label": day_label, + "user_name": USER_NAME, + "task_count": len(output_tasks), + "candidate_count": candidate_count, + "tasks": output_tasks, + "handoffs": handoffs, + "active_tasks": active_tasks, +}, indent=2)) +PYEOF diff --git a/.cursor/skills/standup/scripts/github-pr-activity.sh b/.cursor/skills/standup/scripts/github-pr-activity.sh new file mode 100755 index 0000000..8cd2f15 --- /dev/null +++ b/.cursor/skills/standup/scripts/github-pr-activity.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash +# github-pr-activity.sh — Fetch GitHub PR activity for a given day. +# Detects two categories: +# 1. Addressed review comments: user's own PRs where human reviews existed +# and the user pushed commits on the target date +# 2. Submitted reviews: PRs authored by others that the user reviewed on +# the target date +# +# Usage: +# github-pr-activity.sh [--date YYYY-MM-DD] +# +# Requires: gh CLI authenticated +# +# Output: JSON { date, username, addressed: [...], reviewed: [...] } +set -euo pipefail + +TARGET_DATE="" +while [[ $# -gt 0 ]]; do + case "$1" in + --date) TARGET_DATE="$2"; shift 2 ;; + *) echo "Unknown: $1" >&2; exit 1 ;; + esac +done + +if ! command -v gh &>/dev/null; then + echo "Error: gh CLI not installed" >&2; exit 1 +fi +if ! gh auth status &>/dev/null 2>&1; then + echo "PROMPT_GH_AUTH" >&2; exit 2 +fi + +USERNAME=$(gh api user --jq '.login') + +export TARGET_DATE USERNAME + +python3 - << 'PYEOF' +import json, os, re, subprocess, sys +from datetime import date, timedelta + +USERNAME = os.environ["USERNAME"] +TARGET_DATE_STR = os.environ.get("TARGET_DATE", "") + +if TARGET_DATE_STR: + target = date.fromisoformat(TARGET_DATE_STR) +else: + today = date.today() + if today.weekday() == 0: + target = today - timedelta(days=3) + else: + target = today - timedelta(days=1) + TARGET_DATE_STR = target.isoformat() + + +def gh_graphql(query, variables): + args = ["gh", "api", "graphql", "-f", f"query={query}"] + for k, v in variables.items(): + args.extend(["-f", f"{k}={v}"]) + result = subprocess.run(args, capture_output=True, text=True) + if result.returncode != 0: + print(f"GH_ERROR: {result.stderr[:300]}", file=sys.stderr) + return {"data": {"search": {"nodes": []}}} + parsed = json.loads(result.stdout) + if "errors" in parsed: + print(f"GQL_ERROR: {json.dumps(parsed['errors'][:2])}", file=sys.stderr) + return parsed + + +def extract_asana_gid(body): + if not body: + return None + m = re.search(r'asana\.com/\S*/(\d{10,})', body) + return m.group(1) if m else None + + +# --- Query 1: User's own PRs updated recently (check for addressed comments) --- +QUERY_AUTHORED = """ +query($search: String!) { + search(query: $search, type: ISSUE, first: 50) { + nodes { + ... on PullRequest { + number + title + url + body + repository { nameWithOwner } + reviews(last: 30) { + nodes { + author { login } + state + submittedAt + } + } + commits(last: 30) { + nodes { + commit { + committedDate + author { user { login } } + } + } + } + } + } + } +} +""" + +search_authored = f"is:pr author:{USERNAME} updated:>={TARGET_DATE_STR} sort:updated" +authored_raw = gh_graphql(QUERY_AUTHORED, {"search": search_authored}) + +addressed = [] +for node in authored_raw.get("data", {}).get("search", {}).get("nodes", []): + if not node or "number" not in node: + continue + + has_human_review = False + for r in (node.get("reviews") or {}).get("nodes", []): + if not r or not r.get("author"): + continue + reviewer = r["author"].get("login", "") + if reviewer == USERNAME or "[bot]" in reviewer: + continue + if r.get("state") in ("CHANGES_REQUESTED", "COMMENTED"): + has_human_review = True + break + + if not has_human_review: + continue + + has_commit_on_date = False + for c in (node.get("commits") or {}).get("nodes", []): + commit = (c or {}).get("commit", {}) + committed = (commit.get("committedDate") or "")[:10] + commit_user = ((commit.get("author") or {}).get("user") or {}).get("login", "") + if committed == TARGET_DATE_STR and commit_user == USERNAME: + has_commit_on_date = True + break + + if has_commit_on_date: + addressed.append({ + "pr_number": node["number"], + "pr_title": node["title"], + "pr_url": node["url"], + "repo": node["repository"]["nameWithOwner"], + "asana_gid": extract_asana_gid(node.get("body")), + }) + +# --- Query 2: PRs reviewed by user (not authored by user) --- +QUERY_REVIEWED = """ +query($search: String!) { + search(query: $search, type: ISSUE, first: 50) { + nodes { + ... on PullRequest { + number + title + url + body + repository { nameWithOwner } + reviews(last: 30) { + nodes { + author { login } + state + submittedAt + } + } + } + } + } +} +""" + +search_reviewed = f"is:pr reviewed-by:{USERNAME} -author:{USERNAME} updated:>={TARGET_DATE_STR} sort:updated" +reviewed_raw = gh_graphql(QUERY_REVIEWED, {"search": search_reviewed}) + +reviewed = [] +for node in reviewed_raw.get("data", {}).get("search", {}).get("nodes", []): + if not node or "number" not in node: + continue + + review_state = None + for r in (node.get("reviews") or {}).get("nodes", []): + if not r or not r.get("author"): + continue + if r["author"].get("login") != USERNAME: + continue + submitted = (r.get("submittedAt") or "")[:10] + if submitted == TARGET_DATE_STR: + review_state = r.get("state", "COMMENTED") + break + + if review_state: + reviewed.append({ + "pr_number": node["number"], + "pr_title": node["title"], + "pr_url": node["url"], + "repo": node["repository"]["nameWithOwner"], + "asana_gid": extract_asana_gid(node.get("body")), + "review_state": review_state, + }) + +print(json.dumps({ + "date": TARGET_DATE_STR, + "username": USERNAME, + "addressed": addressed, + "reviewed": reviewed, +}, indent=2)) +PYEOF diff --git a/.cursor/skills/task-review/SKILL.md b/.cursor/skills/task-review/SKILL.md new file mode 100644 index 0000000..2a3fa75 --- /dev/null +++ b/.cursor/skills/task-review/SKILL.md @@ -0,0 +1,102 @@ +--- +name: task-review +description: Fetch context from an Asana task, analyze it, present a summary, and determine the target repo. Use when the user provides an Asana task link for review. +compatibility: Requires jq. ASANA_TOKEN for Asana integration. +metadata: + author: j0ntz +--- + +<goal>Fetch context from an Asana task, analyze it, present a summary, and determine the target repo. This is the **single source of truth** for Asana task understanding — both `im.md` and `pr-create.md` delegate here.</goal> + +<rules description="Non-negotiable constraints."> +<rule id="summary-first">Present the task summary to the user BEFORE exploring any code. Code exploration happens after the user has seen the analysis.</rule> +<rule id="script-timeout">The `asana-get-context.sh` script can take up to 90s (PDF conversion is slow). Always set `block_until_ms: 120000` when invoking it.</rule> +</rules> + +<when-this-runs> +- Automatically as the first step of `im.md` when an Asana task link is provided +- Automatically as Step 1 of `pr-create.md` when an Asana task link is provided +- Can also be invoked standalone: `/task-review https://app.asana.com/1/.../task/<task_gid>` +</when-this-runs> + +<step id="1" name="Fetch task context and attachments"> +Extract the `task_gid` (the final numeric ID in the URL) and run: + +```bash +~/.cursor/skills/asana-get-context.sh <task_gid> +``` + +This fetches task metadata, comments, and **automatically downloads and processes attachments** to `/tmp/asana-task-<task_gid>/`: + +- **Text files** (`.md`, `.txt`, `.json`, `.csv`, `.log`, `.yaml`, `.yml`): Downloaded directly — read them. +- **PDFs**: Text-extracted first (`PDF_TEXT:` output). If the PDF is image-based, converted to page images (`PDF_PAGES:` output). +- **ZIPs**: Unpacked recursively (`UNPACKED:` output). Extracted files (including PDFs inside) are then processed by the same handlers. +- **Images** (`.png`, `.jpg`, `.gif`, `.webp`): Downloaded directly — read them. + +<sub-step name="Reading processed attachments"> +After the script completes, read the processed files based on the output: + +1. **`DOWNLOADED:` paths** — Read any `.txt`, `.md`, `.json`, `.csv`, `.yaml`, `.yml` files listed. +2. **`PDF_TEXT:` paths** — Read the extracted `.txt` file. This is the full text content of the PDF. +3. **`PDF_PAGES:` directories** — Read the page images (`page-01.png`, `page-02.png`, etc.) using the Read tool. For large documents (>20 pages), read the first 10 pages, then skim the rest by reading every 3rd-5th page. +4. **`UNPACKED:` directories** — List contents (`ls -R`), then read relevant files (text files, images, etc.). Skip macOS metadata (`__MACOSX/`, `.DS_Store`). +</sub-step> + +<sub-step name="No attachments case"> +If `ATTACHMENTS: (none)` appears in script output, do **not** probe `/tmp/asana-task-<task_gid>/`. Treat missing `/tmp` paths as expected in this case and continue to Step 2. +</sub-step> +</step> + +<step id="2" name="Determine target repo"> +**Task title prefixes are deterministic signals:** + +| Prefix | Repository | Branch from | +|--------|------|-------------| +| `gui:` | `edge-react-gui` | `develop` | +| `exch:` | `edge-exchange-plugins` | `master` | +| `accb:` | `edge-currency-accountbased` | `master` | +| `core:` | `edge-core-js` | `master` | + +**Always create feature branches from the "Branch from" column.** `edge-react-gui` uses `develop` as its integration branch; the others use `master`. + +If no prefix is present, infer from the task description, keywords, or attached PRs. If still unclear, ask the user. +</step> + +<step id="3" name="Summarize understanding"> +Present a concise summary to the user covering: + +1. **What**: One-sentence description of the task/bug in your own words (not just parroting the title) +2. **Why**: The motivation — what problem does this solve or what value does it add? +3. **Target repo**: Which repo (determined in Step 2) +4. **Scope**: What files/areas of the codebase are likely involved? Use the task description, comments, and your knowledge of the repo to estimate. +5. **Approach**: A brief proposed approach (1-3 bullets). If multiple approaches exist, list them with tradeoffs. +6. **Priority**: Note the priority level if set. + +<sub-step name="Surfacing questions"> +After the summary, list any: +- **Ambiguities**: Requirements that are unclear or could be interpreted multiple ways +- **Missing info**: Information needed that isn't in the task +- **Contradictions**: Conflicting statements between the description and comments +- **Decisions needed**: Choices that the user should weigh in on before implementation begins + +If there are no questions, say so explicitly — don't fabricate them. +</sub-step> + +<sub-step name="Using comments and attachments"> +- **Comments**: Read for updated requirements, decisions, or clarifications that may override the original description. Call out any that change scope. +- **Text attachments**: Read downloaded text files for additional context (specs, requirements, analysis). Reference relevant content in the summary. +- **PDF attachments**: Summarize key content from extracted text or page images. For brand guidelines, note colors, logos, naming, and other visual details. +- **ZIP attachments**: Note the contents and any relevant files found inside. For asset packages (logos, icons), describe the available formats and variants. +- **Image attachments**: View and describe the content. Note any UI mockups, designs, or reference screenshots. +</sub-step> +</step> + +<step id="4" name="Wait for confirmation (im.md and standalone only)"> +When invoked from `im.md` or standalone, end with a clear prompt: + +> Does this match your understanding? Any adjustments before I start? + +**Do NOT begin implementation until the user confirms.** + +When invoked from `pr-create.md`, skip this step — the task context is used for repo/branch resolution and PR enrichment, not for implementation planning. +</step> diff --git a/.cursor/skills/verify-repo.sh b/.cursor/skills/verify-repo.sh new file mode 100755 index 0000000..7aa78f7 --- /dev/null +++ b/.cursor/skills/verify-repo.sh @@ -0,0 +1,348 @@ +#!/usr/bin/env node +// verify-repo.sh +// Runs full verification: CHANGELOG + code verification (prepare, tsc, lint, test) +// Usage: ./verify-repo.sh [repo-dir] [--base <upstream-ref>] [--skip-install] +// If repo-dir not provided, uses current directory +// If --base is provided, lint is scoped to files changed vs that ref +// If --skip-install is provided, skips the initial `yarn` dependency install +// +// Exit codes: +// 0 = All verification passed +// 1 = Code verification failed (prepare/tsc/lint/test) +// 2 = CHANGELOG verification failed + +const { execSync } = require("child_process"); +const { readFileSync, existsSync, writeFileSync } = require("fs"); +const path = require("path"); +const os = require("os"); + +// Parse arguments: positional repo-dir + optional --base <ref> + optional --require-changelog +let repoDir = process.cwd(); +let baseRef = null; +let requireChangelog = false; +let skipInstall = false; +const args = process.argv.slice(2); +for (let i = 0; i < args.length; i++) { + if (args[i] === "--base" && i + 1 < args.length) { + baseRef = args[++i]; + } else if (args[i] === "--require-changelog") { + requireChangelog = true; + } else if (args[i] === "--skip-install") { + skipInstall = true; + } else if (!args[i].startsWith("--")) { + repoDir = args[i]; + } +} + +const packageJsonPath = path.join(repoDir, "package.json"); +const changelogPath = path.join(repoDir, "CHANGELOG.md"); + +function sanitizeLabel(label) { + return label.replace(/[^a-z0-9]/gi, "-").replace(/-+/g, "-").replace(/^-|-$/g, ""); +} + +function runCommandWithLog(command, label, repoDir) { + const safeLabel = sanitizeLabel(label || command); + const logPath = path.join(os.tmpdir(), `verify-${safeLabel}-${Date.now()}-${Math.random().toString(36).slice(2)}.log`); + try { + const output = execSync(command, { + cwd: repoDir, + encoding: "utf8", + stdio: "pipe", + env: { ...process.env, FORCE_COLOR: "1" }, + }); + writeFileSync(logPath, output); + return { success: true, logPath }; + } catch (error) { + const stdout = error.stdout ? error.stdout.toString() : ""; + const stderr = error.stderr ? error.stderr.toString() : ""; + writeFileSync(logPath, stdout + stderr); + return { success: false, logPath, error }; + } +} + +// Detect repo type +const isGui = repoDir.includes("edge-react-gui"); + +console.log("=== Pre-Merge Verification ==="); +console.log(`Directory: ${repoDir}`); +console.log(""); + +// ============================================ +// CHANGELOG Verification +// ============================================ + +function verifyChangelog() { + if (!existsSync(changelogPath)) { + console.log("⏭ CHANGELOG verification - skipped (no CHANGELOG.md)"); + return { success: true, skipped: true }; + } + + console.log("▶ CHANGELOG verification..."); + + let content; + try { + content = readFileSync(changelogPath, "utf8"); + } catch (e) { + console.error(`✗ Failed to read CHANGELOG.md: ${e.message}`); + return { success: false, error: e.message }; + } + + const lines = content.split("\n"); + const errors = []; + const warnings = []; + let hasStagingSection = false; + let hasUnreleasedSection = false; + + const TYPE_ORDER = ["added", "changed", "deprecated", "fixed", "removed", "security"]; + + function entryType(line) { + const m = line.match(/^- (\w+):/i); + return m ? m[1].toLowerCase() : null; + } + + let currentSection = null; + let sectionEntries = []; + let sectionStartLine = 0; + + function validateSection() { + if (currentSection == null) return; + const isActive = currentSection === "unreleased" || currentSection === "staging"; + if (!isActive) return; + + // Empty section check removed — emptiness is validated per-PR via --require-changelog + + const seen = new Set(); + for (const { text, lineNum } of sectionEntries) { + const normalized = text.replace(/\s+/g, " ").trim(); + if (seen.has(normalized)) { + errors.push(`Line ${lineNum}: Duplicate entry in ${currentSection}: "${text.slice(0, 60)}..."`); + } + seen.add(normalized); + } + + let lastTypeIdx = -1; + for (const { text, lineNum } of sectionEntries) { + const type = entryType(text); + if (type == null) continue; + const idx = TYPE_ORDER.indexOf(type); + if (idx === -1) continue; + if (idx < lastTypeIdx) { + const expected = TYPE_ORDER[lastTypeIdx]; + errors.push(`Line ${lineNum}: "${type}" entry after "${expected}" in ${currentSection} — expected order: ${TYPE_ORDER.join(", ")}`); + } + lastTypeIdx = idx; + } + } + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + const lineNum = i + 1; + + if (line.startsWith("<<<<<<<") || line.startsWith("=======") || + line.startsWith(">>>>>>>") || line.startsWith("|||||||")) { + errors.push(`Line ${lineNum}: Unresolved conflict marker: "${line.slice(0, 40)}..."`); + } + + if (line.match(/^## Unreleased/i)) { + validateSection(); + hasUnreleasedSection = true; + currentSection = "unreleased"; + sectionEntries = []; + sectionStartLine = lineNum; + } else if (line.match(/^## .+\(staging\)/i)) { + validateSection(); + hasStagingSection = true; + currentSection = "staging"; + sectionEntries = []; + sectionStartLine = lineNum; + } else if (line.match(/^## \d+\.\d+\.\d+/)) { + validateSection(); + currentSection = "released"; + sectionEntries = []; + sectionStartLine = lineNum; + } + + if (currentSection != null && line.startsWith("- ")) { + sectionEntries.push({ text: line, lineNum }); + const isActive = currentSection === "unreleased" || currentSection === "staging"; + if (isActive && !line.match(/^- (added|changed|fixed|deprecated|removed|security):/i)) { + warnings.push(`Line ${lineNum}: Entry may not follow "- type: description" format`); + } + } + + if (line.match(/^-\s*$/)) { + errors.push(`Line ${lineNum}: Empty list item found`); + } + if (line.match(/^--/) || line.match(/^- -/)) { + errors.push(`Line ${lineNum}: Malformed list item`); + } + } + validateSection(); + + if (!hasUnreleasedSection && !hasStagingSection) { + errors.push("No '## Unreleased' or staging section found"); + } + + if (errors.length > 0) { + console.error("✗ CHANGELOG verification - FAILED"); + for (const e of errors) { + console.error(` ${e}`); + } + return { success: false, errors }; + } + + if (warnings.length > 0) { + console.log("✓ CHANGELOG verification - passed (with warnings)"); + for (const w of warnings) { + console.log(` ⚠ ${w}`); + } + } else { + console.log("✓ CHANGELOG verification - passed"); + } + + if (hasStagingSection && isGui) { + console.log(" ℹ Note: This repo has a staging section"); + } + + return { success: true, hasStagingSection }; +} + +// ============================================ +// Code Verification +// ============================================ + +function verifyCode() { + if (!existsSync(packageJsonPath)) { + console.log("⏭ Code verification - skipped (no package.json)"); + return { success: true, skipped: true }; + } + + let pkg; + try { + pkg = JSON.parse(readFileSync(packageJsonPath, "utf8")); + } catch (e) { + console.error(`✗ Failed to parse package.json: ${e.message}`); + return { success: false, error: e.message }; + } + + const scripts = pkg.scripts || {}; + const commands = ["prepare", "tsc", "lint", "test"]; + + console.log(""); + console.log("Code verification:"); + + if (!skipInstall) { + console.log("▶ yarn..."); + const installResult = runCommandWithLog("yarn", "yarn-install", repoDir); + if (!installResult.success) { + console.error(`✗ yarn - FAILED (log: ${installResult.logPath})\n`); + return { + success: false, + failedStep: "yarn", + logPath: installResult.logPath, + }; + } + console.log("✓ yarn - passed\n"); + } else { + console.log("⏭ yarn - skipped (--skip-install)"); + } + + for (const cmd of commands) { + if (scripts[cmd] == null) { + console.log(`⏭ yarn ${cmd} - skipped (not in package.json)`); + continue; + } + + // When a base ref is provided, scope lint to only files changed by the branch + if (cmd === "lint" && baseRef != null) { + let changedFiles; + try { + changedFiles = execSync( + `git diff --name-only --diff-filter=ACMR ${baseRef}...HEAD -- '*.ts' '*.tsx' '*.js' '*.jsx'`, + { cwd: repoDir, encoding: "utf8" } + ).trim(); + } catch (e) { + console.error(`✗ Failed to determine changed files for lint: ${e.message}`); + return { success: false, failedStep: "lint (changed files)" }; + } + + if (changedFiles.length === 0) { + console.log("⏭ yarn lint - skipped (no lintable files changed)"); + continue; + } + + const fileList = changedFiles.split("\n").map(f => `"${f}"`).join(" "); + const fileCount = changedFiles.split("\n").length; + console.log(`▶ eslint (${fileCount} changed file${fileCount === 1 ? "" : "s"} vs ${baseRef})...`); + const eslintResult = runCommandWithLog( + `npx eslint ${fileList}`, + `eslint-${fileCount}-files`, + repoDir + ); + if (eslintResult.success) { + console.log(`✓ eslint (changed files) - passed\n`); + continue; + } + console.error(`✗ eslint (changed files) - FAILED (log: ${eslintResult.logPath})\n`); + return { + success: false, + failedStep: "eslint (changed files)", + logPath: eslintResult.logPath, + }; + } + + console.log(`▶ yarn ${cmd}...`); + const yarnResult = runCommandWithLog(`yarn ${cmd}`, `yarn-${cmd}`, repoDir); + if (yarnResult.success) { + console.log(`✓ yarn ${cmd} - passed\n`); + continue; + } + console.error(`✗ yarn ${cmd} - FAILED (log: ${yarnResult.logPath})\n`); + return { + success: false, + failedStep: `yarn ${cmd}`, + logPath: yarnResult.logPath, + }; + } + + return { success: true }; +} + +// ============================================ +// Main +// ============================================ + +const changelogResult = verifyChangelog(); +if (!changelogResult.success) { + console.error("\n=== Verification FAILED (CHANGELOG) ==="); + process.exit(2); +} + +if (requireChangelog && baseRef) { + console.log("▶ CHANGELOG entry existence check..."); + try { + const diff = execSync(`git diff --name-only ${baseRef}...HEAD -- CHANGELOG.md`, { + cwd: repoDir, encoding: "utf8" + }).trim(); + if (diff.length === 0) { + console.error("✗ No CHANGELOG.md changes found but PR requires a changelog entry"); + console.error("\n=== Verification FAILED (CHANGELOG) ==="); + process.exit(2); + } + console.log("✓ CHANGELOG entry exists in diff"); + } catch (e) { + console.error(`✗ Failed to check CHANGELOG diff: ${e.message}`); + process.exit(2); + } +} + +const codeResult = verifyCode(); +if (!codeResult.success) { + console.error("\n=== Verification FAILED (Code) ==="); + console.error(`Failed step: ${codeResult.failedStep || codeResult.error}`); + process.exit(1); +} + +console.log("\n=== Verification PASSED ==="); +process.exit(0); diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b4c292e --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +node_modules/ +*.log +.env +.env.* diff --git a/README.md b/README.md new file mode 100644 index 0000000..707d180 --- /dev/null +++ b/README.md @@ -0,0 +1,33 @@ +# edge-dev-agents + +Development agent configurations: Cursor skills, Claude Code rules, and (soon) OpenClaw workspace files. + +## Contents + +- `.cursor/` — Skills, rules, and scripts synced from `~/.cursor/` via the `convention-sync` skill +- `.claude/` — Auto-generated `CLAUDE.md` and skills symlink for Claude Code compatibility +- `scripts/setup.sh` — Bootstrap script for deploying to new machines + +## Setup (new machine) + +```bash +git clone git@github.com:EdgeApp/edge-dev-agents.git ~/git/edge-dev-agents +cd ~/git/edge-dev-agents +./scripts/setup.sh +``` + +This creates symlinks from `~/.cursor/` and `~/.claude/` into the repo, so both Cursor and Claude Code discover the skills and rules. + +## Syncing + +After editing skills locally in `~/.cursor/`, sync to this repo: + +``` +/convention-sync +``` + +To pull changes from this repo into your local setup: + +```bash +./scripts/setup.sh +``` diff --git a/scripts/setup.sh b/scripts/setup.sh new file mode 100755 index 0000000..7b358ce --- /dev/null +++ b/scripts/setup.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# setup.sh — Bootstrap edge-dev-agents on a new machine. +# Usage: ./scripts/setup.sh +# +# Creates symlinks from ~/.cursor/ and ~/.claude/ into this repo's +# .cursor/ content, then generates ~/.claude/CLAUDE.md from alwaysApply rules. +# Idempotent — safe to re-run. + +set -euo pipefail + +REPO_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +CURSOR_SRC="$REPO_DIR/.cursor" + +if [[ ! -d "$CURSOR_SRC/skills" ]]; then + echo "ERROR: $CURSOR_SRC/skills not found. Is this the edge-dev-agents repo?" >&2 + exit 1 +fi + +# 1. Symlink ~/.cursor/{skills,rules,scripts} → repo equivalents +echo "Setting up ~/.cursor/ symlinks..." +mkdir -p "$HOME/.cursor" +for dir in skills rules scripts; do + target="$CURSOR_SRC/$dir" + link="$HOME/.cursor/$dir" + if [[ -L "$link" ]]; then + current="$(readlink "$link")" + if [[ "$current" == "$target" ]]; then + echo " $dir: already linked" + continue + fi + rm "$link" + elif [[ -d "$link" ]]; then + echo " WARNING: $link is a real directory, not a symlink. Skipping." + echo " Remove it manually if you want to link to the repo." + continue + fi + ln -s "$target" "$link" + echo " $dir: linked → $target" +done + +# 2. Symlink ~/.claude/skills → ~/.cursor/skills +echo "Setting up ~/.claude/skills symlink..." +mkdir -p "$HOME/.claude" +CLAUDE_SKILLS="$HOME/.claude/skills" +if [[ -L "$CLAUDE_SKILLS" ]]; then + current="$(readlink "$CLAUDE_SKILLS")" + if [[ "$current" != "$HOME/.cursor/skills" ]]; then + rm "$CLAUDE_SKILLS" + ln -s "$HOME/.cursor/skills" "$CLAUDE_SKILLS" + echo " skills: relinked → ~/.cursor/skills" + else + echo " skills: already linked" + fi +elif [[ ! -e "$CLAUDE_SKILLS" ]]; then + ln -s "$HOME/.cursor/skills" "$CLAUDE_SKILLS" + echo " skills: linked → ~/.cursor/skills" +fi + +# 3. Generate ~/.claude/CLAUDE.md from alwaysApply rules +GEN_SCRIPT="$CURSOR_SRC/skills/convention-sync/scripts/generate-claude-md.sh" +if [[ -x "$GEN_SCRIPT" ]]; then + echo "Generating ~/.claude/CLAUDE.md..." + "$GEN_SCRIPT" >/dev/null + echo " CLAUDE.md generated" +else + echo "WARNING: generate-claude-md.sh not found or not executable" +fi + +# 4. Make all .sh files executable +find "$REPO_DIR" -name "*.sh" -exec chmod +x {} + + +# 5. Check prerequisites +echo "" +echo "Checking prerequisites..." +for cmd in gh jq node; do + if command -v "$cmd" >/dev/null 2>&1; then + echo " $cmd: $(command -v "$cmd")" + else + echo " WARNING: $cmd not found" + fi +done + +echo "" +echo "Setup complete." From 7ac0630af3652ba6cdf82d7e7e6f634774e5482e Mon Sep 17 00:00:00 2001 From: j0ntz <jnthntzng@gmail.com> Date: Tue, 24 Mar 2026 16:06:11 -0700 Subject: [PATCH 2/6] Update convention-sync to target edge-dev-agents --- .cursor/skills/convention-sync/SKILL.md | 10 +++--- .../scripts/convention-sync.sh | 16 +++++----- README.md | 32 +------------------ 3 files changed, 14 insertions(+), 44 deletions(-) diff --git a/.cursor/skills/convention-sync/SKILL.md b/.cursor/skills/convention-sync/SKILL.md index cde8050..4ec3b9b 100644 --- a/.cursor/skills/convention-sync/SKILL.md +++ b/.cursor/skills/convention-sync/SKILL.md @@ -1,12 +1,12 @@ --- name: convention-sync -description: Sync cursor files between ~/.cursor/ and the edge-conventions repo, commit, push, and update PR description. Use when the user wants to sync conventions. +description: Sync cursor files between ~/.cursor/ and the edge-dev-agents repo, commit, push, and update PR description. Use when the user wants to sync conventions. compatibility: Requires git, gh. metadata: author: j0ntz --- -<goal>Sync cursor files between `~/.cursor/` and the `edge-conventions` repo, commit, push, and update PR description from README. Also maintains cross-tool compatibility: symlinks `~/.claude/skills` → `~/.cursor/skills` and generates `~/.claude/CLAUDE.md` from always-apply rules.</goal> +<goal>Sync cursor files between `~/.cursor/` and the `edge-dev-agents` repo, commit, push, and update PR description from README. Also maintains cross-tool compatibility: symlinks `~/.claude/skills` → `~/.cursor/skills` and generates `~/.claude/CLAUDE.md` from always-apply rules.</goal> <rules> <rule id="local-is-canonical">`~/.cursor/` is the canonical source. Edits happen locally; the repo is the distribution copy. Default direction is `user-to-repo`. Use `--repo-to-user` only for onboarding or pulling changes authored by others. The script does not detect bidirectional conflicts — whichever direction you run overwrites the other side.</rule> @@ -15,11 +15,11 @@ metadata: <rule id="no-script-bypass">If the script fails, report the error and STOP.</rule> <rule id="readme-is-source">`.cursor/README.md` is the source of truth for documentation. The script mirrors it to the PR description automatically.</rule> <rule id="claude-compat">Every run ensures `~/.claude/skills` symlinks to `~/.cursor/skills` and regenerates `~/.claude/CLAUDE.md` from `alwaysApply: true` rules. This enables OpenCode and Claude Code to discover skills and rules without separate config.</rule> -<rule id="target-repo-resolution">For user-to-repo sync, target the `edge-conventions` checkout. Do NOT assume the current repo is correct just because it contains a `.cursor/` folder. Let the companion script resolve and validate the repo path.</rule> +<rule id="target-repo-resolution">For user-to-repo sync, target the `edge-dev-agents` checkout. Do NOT assume the current repo is correct just because it contains a `.cursor/` folder. Let the companion script resolve and validate the repo path.</rule> </rules> <step id="1" name="Detect changes and PR status"> -Use the companion script's default repo resolution first. It targets the `edge-conventions` checkout and fails if the resolved or provided repo is not actually `edge-conventions`. +Use the companion script's default repo resolution first. It targets the `edge-dev-agents` checkout and fails if the resolved or provided repo is not actually `edge-dev-agents`. Run the sync script in dry-run mode: @@ -78,7 +78,7 @@ cd <repo-dir> && gh pr edit --body-file .cursor/README.md <edge-cases> <case name="Reverse sync (repo → user)">If the user says "pull from repo" or "update my local", run with `--repo-to-user --stage` instead. No git operations needed.</case> -<case name="Current repo has a .cursor folder but is not edge-conventions">Do not sync into that repo. Fall back to `~/git/edge-conventions` or ask for the correct repo path.</case> +<case name="Current repo has a .cursor folder but is not edge-dev-agents">Do not sync into that repo. Fall back to `~/git/edge-dev-agents` or ask for the correct repo path.</case> <case name="Dry-run resolved a repo path">Reuse the `repoDir` value from the script's JSON output for the PR query, commit run, push, and PR edit steps.</case> <case name="Selective sync">To permanently exclude files, add glob patterns to `~/.cursor/.syncignore` (one per line, `#` comments). The script skips matching entries and reports them in the `ignored` array. To exclude ad-hoc, remove files from staging with `git reset HEAD .cursor/<file>` before committing.</case> <case name="No README">If `.cursor/README.md` doesn't exist, skip PR description update and warn the user.</case> diff --git a/.cursor/skills/convention-sync/scripts/convention-sync.sh b/.cursor/skills/convention-sync/scripts/convention-sync.sh index fe0adc3..2cd56c1 100755 --- a/.cursor/skills/convention-sync/scripts/convention-sync.sh +++ b/.cursor/skills/convention-sync/scripts/convention-sync.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# convention-sync.sh — Sync ~/.cursor/ files with the edge-conventions repo. +# convention-sync.sh — Sync ~/.cursor/ files with the edge-dev-agents repo. # Usage: ./convention-sync.sh [repo-dir] [--stage] [--commit -m "message"] [--repo-to-user] # Compares ~/.cursor/{skills,rules,scripts} against <repo-dir>/.cursor/ and # outputs a structured JSON summary of new, modified, and deleted files. @@ -22,20 +22,20 @@ resolve_default_repo_dir() { local cwd remote_url default_repo cwd="$(pwd)" - if [[ "$(basename "$cwd")" == "edge-conventions" ]]; then + if [[ "$(basename "$cwd")" == "edge-dev-agents" ]]; then printf '%s\n' "$cwd" return 0 fi if git -C "$cwd" rev-parse --is-inside-work-tree >/dev/null 2>&1; then remote_url="$(git -C "$cwd" remote get-url origin 2>/dev/null || true)" - if [[ "$remote_url" == *"edge-conventions"* ]]; then + if [[ "$remote_url" == *"edge-dev-agents"* ]]; then printf '%s\n' "$cwd" return 0 fi fi - default_repo="$HOME/git/edge-conventions" + default_repo="$HOME/git/edge-dev-agents" if [[ -d "$default_repo/.git" || -f "$default_repo/.git" ]]; then printf '%s\n' "$default_repo" return 0 @@ -53,18 +53,18 @@ validate_repo_dir() { return 1 fi - if [[ "$(basename "$repo_dir")" == "edge-conventions" ]]; then + if [[ "$(basename "$repo_dir")" == "edge-dev-agents" ]]; then return 0 fi if git -C "$repo_dir" rev-parse --is-inside-work-tree >/dev/null 2>&1; then remote_url="$(git -C "$repo_dir" remote get-url origin 2>/dev/null || true)" - if [[ "$remote_url" == *"edge-conventions"* ]]; then + if [[ "$remote_url" == *"edge-dev-agents"* ]]; then return 0 fi fi - echo "ERROR: Repo directory does not appear to be the edge-conventions checkout: $repo_dir" >&2 + echo "ERROR: Repo directory does not appear to be the edge-dev-agents checkout: $repo_dir" >&2 return 1 } @@ -80,7 +80,7 @@ done if [[ -z "$REPO_DIR" ]]; then if ! REPO_DIR="$(resolve_default_repo_dir)"; then - echo "ERROR: Could not resolve the edge-conventions repo. Run with an explicit repo path." >&2 + echo "ERROR: Could not resolve the edge-dev-agents repo. Run with an explicit repo path." >&2 echo "Usage: convention-sync.sh [repo-dir] [--stage] [--commit -m \"message\"]" >&2 exit 1 fi diff --git a/README.md b/README.md index 707d180..d92c743 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,3 @@ # edge-dev-agents -Development agent configurations: Cursor skills, Claude Code rules, and (soon) OpenClaw workspace files. - -## Contents - -- `.cursor/` — Skills, rules, and scripts synced from `~/.cursor/` via the `convention-sync` skill -- `.claude/` — Auto-generated `CLAUDE.md` and skills symlink for Claude Code compatibility -- `scripts/setup.sh` — Bootstrap script for deploying to new machines - -## Setup (new machine) - -```bash -git clone git@github.com:EdgeApp/edge-dev-agents.git ~/git/edge-dev-agents -cd ~/git/edge-dev-agents -./scripts/setup.sh -``` - -This creates symlinks from `~/.cursor/` and `~/.claude/` into the repo, so both Cursor and Claude Code discover the skills and rules. - -## Syncing - -After editing skills locally in `~/.cursor/`, sync to this repo: - -``` -/convention-sync -``` - -To pull changes from this repo into your local setup: - -```bash -./scripts/setup.sh -``` +Agent skills, rules, and scripts for development workflow orchestration. From c9c4b673ae54695eb376a544d4829376017282e1 Mon Sep 17 00:00:00 2001 From: j0ntz <jnthntzng@gmail.com> Date: Wed, 25 Mar 2026 15:40:52 -0700 Subject: [PATCH 3/6] pr-address: add ensure-branch step for automatic branch checkout --- .cursor/skills/pr-address/SKILL.md | 14 +++++++++ .../skills/pr-address/scripts/pr-address.sh | 30 ++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/.cursor/skills/pr-address/SKILL.md b/.cursor/skills/pr-address/SKILL.md index f5972cd..596c2a8 100644 --- a/.cursor/skills/pr-address/SKILL.md +++ b/.cursor/skills/pr-address/SKILL.md @@ -20,6 +20,20 @@ metadata: <rule id="resolution-source-of-truth">Only explicitly resolved threads (`isResolved: true`) or `<!-- addressed:... -->` markers count as resolved. Recency (commits after a comment) does NOT mean resolved.</rule> </rules> +<step id="0" name="Ensure correct branch"> +Before any other work, ensure the PR's branch is checked out and up to date: + +```bash +~/.cursor/skills/pr-address/scripts/pr-address.sh ensure-branch --owner <OWNER> --repo <REPO> --pr <NUMBER> +``` + +The script: +- If already on the PR branch → pulls latest +- If on a different branch → stashes uncommitted changes (if any), checks out the PR branch, pulls latest + +Output includes `BRANCH_READY`, `STASHED`, and (if switched) `PREVIOUS_BRANCH`. If `STASHED=true`, inform the user that changes were stashed on the previous branch. +</step> + <step id="1" name="Fetch all unresolved feedback and PR body"> Always fetch live from GitHub. Run both in parallel: diff --git a/.cursor/skills/pr-address/scripts/pr-address.sh b/.cursor/skills/pr-address/scripts/pr-address.sh index fd4bcd6..ff0e182 100755 --- a/.cursor/skills/pr-address/scripts/pr-address.sh +++ b/.cursor/skills/pr-address/scripts/pr-address.sh @@ -12,6 +12,7 @@ # resolve-id --owner <o> --repo <r> --pr <n> --node-id <id> # headline --owner <o> --repo <r> --sha <sha> # fetch-pr-body --owner <o> --repo <r> --pr <n> Fetch current PR body → /tmp/pr-body.md +# ensure-branch --owner <o> --repo <r> --pr <n> Checkout PR branch, stash if needed, pull # autosquash Rebase --autosquash from merge-base # # Exit codes: 0 = success, 1 = error, 2 = needs user input (e.g. gh not authenticated) @@ -316,6 +317,33 @@ case "$CMD" in echo ">> Wrote PR body to /tmp/pr-body.md ($(wc -c < /tmp/pr-body.md | tr -d ' ') bytes)" ;; + ensure-branch) + require_gh + if [[ -z "$OWNER" || -z "$REPO" || -z "$PR" ]]; then + echo "Error: --owner, --repo, --pr required" >&2; exit 1 + fi + + PR_BRANCH=$(gh api "repos/$OWNER/$REPO/pulls/$PR" --jq '.head.ref') + CURRENT_BRANCH=$(git branch --show-current) + + if [[ "$CURRENT_BRANCH" == "$PR_BRANCH" ]]; then + echo ">> Already on $PR_BRANCH — pulling latest" + git pull --ff-only 2>&1 || git pull --rebase 2>&1 + echo ">> BRANCH_READY=$PR_BRANCH STASHED=false" + else + STASHED=false + if ! git diff --quiet HEAD 2>/dev/null || ! git diff --cached --quiet HEAD 2>/dev/null || [[ -n "$(git ls-files --others --exclude-standard)" ]]; then + echo ">> Stashing uncommitted changes on $CURRENT_BRANCH" + git stash -u + STASHED=true + fi + echo ">> Switching from $CURRENT_BRANCH to $PR_BRANCH" + git checkout "$PR_BRANCH" 2>&1 + git pull --ff-only 2>&1 || git pull --rebase 2>&1 + echo ">> BRANCH_READY=$PR_BRANCH STASHED=$STASHED PREVIOUS_BRANCH=$CURRENT_BRANCH" + fi + ;; + autosquash) DEFAULT_UPSTREAM=$(git symbolic-ref --quiet --short refs/remotes/origin/HEAD 2>/dev/null \ || echo "origin/$(git remote show origin | sed -n '/HEAD branch/s/.*: //p')") @@ -325,7 +353,7 @@ case "$CMD" in ;; *) - echo "Usage: pr-address.sh {fetch|fetch-thread|reply|resolve-thread|mark-addressed|resolve-id|headline|fetch-pr-body|autosquash} [args]" >&2 + echo "Usage: pr-address.sh {fetch|fetch-thread|reply|resolve-thread|mark-addressed|resolve-id|headline|fetch-pr-body|ensure-branch|autosquash} [args]" >&2 exit 1 ;; esac From e26811def23e138c11133f5f5f0103b349c26a09 Mon Sep 17 00:00:00 2001 From: j0ntz <jnthntzng@gmail.com> Date: Fri, 27 Mar 2026 17:44:43 -0700 Subject: [PATCH 4/6] pr-land: support explicit PRs, Asana tasks, and mixed args in discover --- .cursor/skills/pr-land/SKILL.md | 27 +- .../pr-land/scripts/pr-land-discover.sh | 287 +++++++++++++----- 2 files changed, 237 insertions(+), 77 deletions(-) diff --git a/.cursor/skills/pr-land/SKILL.md b/.cursor/skills/pr-land/SKILL.md index 508df06..7c0459c 100644 --- a/.cursor/skills/pr-land/SKILL.md +++ b/.cursor/skills/pr-land/SKILL.md @@ -6,14 +6,24 @@ metadata: author: j0ntz --- -<goal>Land approved PRs on `$GIT_BRANCH_PREFIX/*` branches by autosquashing fixups, rebasing onto the default upstream branch, and pushing.</goal> +<goal>Land approved PRs by autosquashing fixups, rebasing onto the default upstream branch, and merging. Accepts repo names, explicit PR references, or Asana task URLs.</goal> <usage> ``` -/pr-land # All EdgeApp repos with $GIT_BRANCH_PREFIX/* PRs -/pr-land edge-react-gui # Specific repo -/pr-land edge-react-gui edge-core-js # Multiple repos +/pr-land # All EdgeApp repos with $GIT_BRANCH_PREFIX/* PRs +/pr-land edge-react-gui # Specific repo +/pr-land edge-react-gui edge-core-js # Multiple repos +/pr-land edge-react-gui#123 # Specific PR (shorthand) +/pr-land https://github.com/EdgeApp/edge-react-gui/pull/123 # Specific PR (URL) +/pr-land https://app.asana.com/0/1234/5678 # Asana task → resolves linked PRs +/pr-land edge-react-gui#123 edge-core-js # Mix: explicit PR + repo scan ``` + +Arguments are classified automatically: +- **Repo names** → branch-prefix scan (original behavior) +- **PR URLs / shorthand** (`repo#N`) → fetched directly, no branch-prefix filter +- **Asana task URLs** → resolved to linked GitHub PRs via Asana API (requires `ASANA_TOKEN`) +- **No args** → scans all EdgeApp repos </usage> <rules description="Non-negotiable constraints."> @@ -47,7 +57,7 @@ metadata: | Script | Exit 0 | Exit 1 | Exit 2 | Exit 3 | Exit 4 | |--------|--------|--------|--------|--------|--------| -| `pr-land-discover.sh` | Success | Error | - | - | - | +| `pr-land-discover.sh` | Success | Error | Auth needed | - | - | | `pr-land-comments.sh` | Success | Error | - | - | - | | `pr-land-prepare.sh` | Ready | All failed | - | - | - | | `verify-repo.sh` | Pass | Code fail | CHANGELOG fail | - | - | @@ -63,10 +73,13 @@ metadata: ONE tool call: ```bash -~/.cursor/skills/pr-land/scripts/pr-land-discover.sh [repo1 repo2 ...] +~/.cursor/skills/pr-land/scripts/pr-land-discover.sh [args...] ``` -Returns JSON with all `$GIT_BRANCH_PREFIX/*` PRs and their approval status. +Args can be repo names, PR URLs, PR shorthand (`repo#N`), or Asana task URLs (mixed freely). +No args = scan all EdgeApp repos for `$GIT_BRANCH_PREFIX/*` PRs. + +Returns JSON: `{ "prs": [...], "errors": [...] }`. Each PR has `repo`, `prNumber`, `branch`, `title`, `approved`, `changesRequested`, `reviewers`. Errors include Asana resolution failures or PR fetch failures. </step> <step id="2" name="Comment Check and Addressing"> diff --git a/.cursor/skills/pr-land/scripts/pr-land-discover.sh b/.cursor/skills/pr-land/scripts/pr-land-discover.sh index b7a5d4f..cc85e77 100755 --- a/.cursor/skills/pr-land/scripts/pr-land-discover.sh +++ b/.cursor/skills/pr-land/scripts/pr-land-discover.sh @@ -1,14 +1,21 @@ #!/usr/bin/env node -// pr-land-discover.sh — Discovers all user's open PRs across EdgeApp repos -// with approval status using a single GraphQL query. +// pr-land-discover.sh — Discovers open PRs across EdgeApp repos with approval status. // -// Usage: ./pr-land-discover.sh [repo1] [repo2] ... -// Example: ./pr-land-discover.sh edge-react-gui edge-core-js -// Example: ./pr-land-discover.sh (no args = all EdgeApp repos) +// Accepts mixed argument types: +// Repo names: edge-react-gui edge-core-js +// PR URLs: https://github.com/EdgeApp/edge-react-gui/pull/123 +// PR shorthand: edge-react-gui#123 +// Asana tasks: https://app.asana.com/0/<project>/<taskGid> +// No args: all EdgeApp repos (branch-prefix scan) +// +// Explicit PRs (URL/shorthand) are fetched directly — no branch-prefix filter. +// Asana tasks are resolved to linked GitHub PRs via the Asana API (requires ASANA_TOKEN). +// Repo names trigger the original branch-prefix scan. const { spawnSync } = require("child_process"); +const https = require("https"); -const specifiedRepos = process.argv.slice(2); +const args = process.argv.slice(2); const edgeAppRepos = [ "edge-react-gui", "edge-exchange-plugins", @@ -17,7 +24,39 @@ const edgeAppRepos = [ "edge-login-ui-rn", "edge-currency-plugins", ]; -const repos = specifiedRepos.length > 0 ? specifiedRepos : edgeAppRepos; + +// --- Argument classification --- + +const PR_URL_RE = /^https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/; +const PR_SHORT_RE = /^([a-z][a-z0-9-]+)#(\d+)$/; +const ASANA_URL_RE = /^https:\/\/app\.asana\.com\/\d+\/\d+\/(?:task\/)?(\d+)/; + +const repoArgs = []; +const explicitPrs = []; // {repo, prNumber} +const asanaGids = []; + +for (const arg of args) { + let m; + if ((m = arg.match(PR_URL_RE))) { + explicitPrs.push({ repo: m[1], prNumber: Number(m[2]) }); + } else if ((m = arg.match(PR_SHORT_RE))) { + explicitPrs.push({ repo: m[1], prNumber: Number(m[2]) }); + } else if ((m = arg.match(ASANA_URL_RE))) { + asanaGids.push(m[1]); + } else { + repoArgs.push(arg); + } +} + +// If no args at all, default to scanning all repos +const scanRepos = + args.length === 0 + ? edgeAppRepos + : repoArgs.length > 0 + ? repoArgs + : []; + +// --- Helpers --- function requireGh() { const check = spawnSync("gh", ["auth", "status"], { encoding: "utf8" }); @@ -28,11 +67,11 @@ function requireGh() { } function ghGraphql(query, variables = {}) { - const args = ["api", "graphql", "-f", `query=${query}`]; + const gqlArgs = ["api", "graphql", "-f", `query=${query}`]; for (const [k, v] of Object.entries(variables)) { - args.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`); + gqlArgs.push(typeof v === "number" ? "-F" : "-f", `${k}=${v}`); } - const result = spawnSync("gh", args, { encoding: "utf8" }); + const result = spawnSync("gh", gqlArgs, { encoding: "utf8" }); if (result.status !== 0) { throw new Error(`GraphQL failed: ${(result.stderr || "").trim()}`); } @@ -43,14 +82,125 @@ function ghGraphql(query, variables = {}) { return parsed.data; } -requireGh(); +function ghApi(endpoint) { + const result = spawnSync("gh", ["api", endpoint], { encoding: "utf8" }); + if (result.status !== 0) { + throw new Error(`gh api failed: ${(result.stderr || "").trim()}`); + } + return JSON.parse(result.stdout); +} + +function asanaGet(path) { + const token = process.env.ASANA_TOKEN; + if (!token) throw new Error("ASANA_TOKEN not set"); + return new Promise((resolve, reject) => { + const req = https.get( + `https://app.asana.com/api/1.0${path}`, + { headers: { Authorization: `Bearer ${token}` } }, + (res) => { + let body = ""; + res.on("data", (d) => (body += d)); + res.on("end", () => { + if (res.statusCode !== 200) + return reject(new Error(`Asana ${res.statusCode}: ${body}`)); + resolve(JSON.parse(body).data); + }); + } + ); + req.on("error", reject); + }); +} + +function extractReviewers(reviews) { + const latestByUser = {}; + for (const r of reviews) { + const login = r.author?.login; + if (!login) continue; + if ( + !latestByUser[login] || + new Date(r.submittedAt) > new Date(latestByUser[login].submittedAt) + ) { + latestByUser[login] = r; + } + } + const reviewers = Object.values(latestByUser); + return { + approved: reviewers.some((r) => r.state === "APPROVED"), + changesRequested: reviewers.some((r) => r.state === "CHANGES_REQUESTED"), + reviewers: reviewers.map((r) => ({ + user: r.author.login, + state: r.state, + })), + }; +} + +// --- Main --- + +async function main() { + requireGh(); -// Build a single GraphQL query with aliases for all repos. -// Each alias fetches open PRs + latest review state in one round-trip. -const repoFragments = repos - .map((repo, i) => { - const alias = `repo${i}`; - return `${alias}: repository(owner: "EdgeApp", name: "${repo}") { + const results = { prs: [], errors: [] }; + + // 1. Resolve Asana tasks → explicit PRs + for (const gid of asanaGids) { + try { + const task = await asanaGet( + `/tasks/${gid}?opt_fields=name,notes,permalink_url` + ); + // Look for GitHub PR URLs in task notes + const ghPrRe = + /https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/g; + let match; + let found = false; + while ((match = ghPrRe.exec(task.notes || "")) !== null) { + explicitPrs.push({ repo: match[1], prNumber: Number(match[2]) }); + found = true; + } + if (!found) { + results.errors.push( + `Asana task ${gid} (${task.name}): no GitHub PR link found in description` + ); + } + } catch (e) { + results.errors.push(`Asana task ${gid}: ${e.message}`); + } + } + + // 2. Fetch explicit PRs directly (no branch-prefix filter) + for (const { repo, prNumber } of explicitPrs) { + try { + const pr = ghApi(`repos/EdgeApp/${repo}/pulls/${prNumber}`); + const reviewsRaw = ghApi( + `repos/EdgeApp/${repo}/pulls/${prNumber}/reviews` + ); + const { approved, changesRequested, reviewers } = extractReviewers( + reviewsRaw.map((r) => ({ + author: { login: r.user?.login }, + state: r.state, + submittedAt: r.submitted_at, + })) + ); + results.prs.push({ + repo, + prNumber: pr.number, + branch: pr.head.ref, + title: pr.title, + updatedAt: pr.updated_at, + approved, + changesRequested, + reviewers, + }); + } catch (e) { + results.errors.push(`${repo}#${prNumber}: ${e.message}`); + } + } + + // 3. Scan repos by branch prefix (original behavior) + if (scanRepos.length > 0) { + const repoFragments = scanRepos + .map((repo, i) => { + const alias = `repo${i}`; + return `${alias}: repository(owner: "EdgeApp", name: "${repo}") { name pullRequests(first: 100, states: OPEN) { nodes { @@ -68,65 +218,62 @@ const repoFragments = repos } } }`; - }) - .join("\n "); + }) + .join("\n "); -const query = `{ ${repoFragments} }`; + const query = `{ ${repoFragments} }`; -let data; -try { - data = ghGraphql(query); -} catch (e) { - console.error("ERROR:", e.message); - process.exit(1); -} + let data; + try { + data = ghGraphql(query); + } catch (e) { + console.error("ERROR:", e.message); + process.exit(1); + } + + for (const key of Object.keys(data)) { + const repoData = data[key]; + if (!repoData) continue; + const repo = repoData.name; + + for (const pr of repoData.pullRequests.nodes) { + if (!pr.headRefName.startsWith("jon/")) continue; + + const { approved, changesRequested, reviewers } = extractReviewers( + pr.reviews.nodes + ); -const results = { prs: [], errors: [] }; - -for (const key of Object.keys(data)) { - const repoData = data[key]; - if (!repoData) continue; - const repo = repoData.name; - - for (const pr of repoData.pullRequests.nodes) { - if (!pr.headRefName.startsWith("jon/")) continue; - - // Dedupe reviews: keep latest per reviewer - const latestByUser = {}; - for (const r of pr.reviews.nodes) { - const login = r.author?.login; - if (!login) continue; - if ( - !latestByUser[login] || - new Date(r.submittedAt) > new Date(latestByUser[login].submittedAt) - ) { - latestByUser[login] = r; + results.prs.push({ + repo, + prNumber: pr.number, + branch: pr.headRefName, + title: pr.title, + updatedAt: pr.updatedAt, + approved, + changesRequested, + reviewers, + }); } } + } - const reviewers = Object.values(latestByUser); - const approved = reviewers.some((r) => r.state === "APPROVED"); - const changesRequested = reviewers.some( - (r) => r.state === "CHANGES_REQUESTED" - ); + // Dedupe by repo+prNumber (in case Asana/explicit overlap with scan) + const seen = new Set(); + results.prs = results.prs.filter((pr) => { + const key = `${pr.repo}#${pr.prNumber}`; + if (seen.has(key)) return false; + seen.add(key); + return true; + }); - results.prs.push({ - repo, - prNumber: pr.number, - branch: pr.headRefName, - title: pr.title, - updatedAt: pr.updatedAt, - approved, - changesRequested, - reviewers: reviewers.map((r) => ({ - user: r.author.login, - state: r.state, - })), - }); - } + results.prs.sort( + (a, b) => + a.repo.localeCompare(b.repo) || a.branch.localeCompare(b.branch) + ); + console.log(JSON.stringify(results, null, 2)); } -results.prs.sort( - (a, b) => a.repo.localeCompare(b.repo) || a.branch.localeCompare(b.branch) -); -console.log(JSON.stringify(results, null, 2)); +main().catch((e) => { + console.error("ERROR:", e.message); + process.exit(1); +}); From d8ddc3772016145e2921bd69859c1153221846dc Mon Sep 17 00:00:00 2001 From: j0ntz <jnthntzng@gmail.com> Date: Fri, 27 Mar 2026 17:58:00 -0700 Subject: [PATCH 5/6] pr-land: reorder publish/dep-upgrade before staging cherry-pick, fix Asana URL regex --- .cursor/skills/pr-land/SKILL.md | 63 ++++++++++--------- .../pr-land/scripts/pr-land-discover.sh | 5 +- .../scripts/pr-land-extract-asana-task.sh | 2 +- 3 files changed, 37 insertions(+), 33 deletions(-) diff --git a/.cursor/skills/pr-land/SKILL.md b/.cursor/skills/pr-land/SKILL.md index 7c0459c..2723eba 100644 --- a/.cursor/skills/pr-land/SKILL.md +++ b/.cursor/skills/pr-land/SKILL.md @@ -38,7 +38,7 @@ Arguments are classified automatically: <rule id="unexpected-exit">Unexpected exit codes → STOP immediately. If any script returns an exit code not documented in this file, STOP and report to user. Do NOT attempt to interpret, retry, or work around unexpected errors.</rule> <rule id="sequential-rebase">Sequential merging requires rebase. Each subsequent PR MUST be rebased onto the updated base branch after the previous merge.</rule> <rule id="publish-gating">Don't publish if outstanding PRs remain. Only offer to publish a repo when ALL approved PRs for that repo are merged. If any were skipped or held back, do NOT publish that repo.</rule> -<rule id="npm-publish-gate">Step 8 CANNOT begin until the user explicitly confirms npm publish succeeded. `npm publish` requires interactive 2FA — the agent cannot run it. Do NOT infer publish completion from git push or tagging. STOP and WAIT for user confirmation.</rule> +<rule id="npm-publish-gate">Step 7 CANNOT begin until the user explicitly confirms npm publish succeeded. `npm publish` requires interactive 2FA — the agent cannot run it. Do NOT infer publish completion from git push or tagging. STOP and WAIT for user confirmation.</rule> <rule id="asana-last">Asana updates are LAST. Do NOT update Asana tasks until ALL merges, publishes, and GUI dependency upgrades are complete. Only update status for PRs that are fully landed (merged, and if non-GUI: published + GUI deps updated).</rule> </rules> @@ -159,35 +159,9 @@ The merge script processes PRs **sequentially** with automatic rebase-before-mer **On exit 4:** Agent resolves semantically, pushes, re-runs merge. Script detects already-merged PRs and skips them. </step> -<step id="6" name="Staging Cherry-Pick"> -**Trigger:** Only for `edge-react-gui` PRs whose CHANGELOG entries are in the `## X.Y.Z (staging)` section (not `## Unreleased`). Check the merged PR's CHANGELOG diff to determine this — if the entry was added under a `(staging)` heading, the PR needs cherry-picking. - -**Skip** this step entirely if no merged PRs have staging CHANGELOG entries. - -For qualifying PRs, invoke the `/staging-cherry-pick` skill: - -```bash -echo '[{"repo":"edge-react-gui","prNumber":123,"mergeSha":"abc123"}]' | ~/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh -``` - -Pass the `mergeSha` from the merge step's JSON output. The script cherry-picks individual (non-merge) commits onto the staging branch. - -**On exit 3 (CHANGELOG conflict):** Resolve semantically (existing staging entries first, then the new entry), then `git add CHANGELOG.md && GIT_EDITOR=true git cherry-pick --continue`. Re-run for remaining PRs. - -**On exit 1 (code conflict):** STOP and report to user. - -After cherry-picks succeed, ask user to confirm push: -```bash -git push origin staging -``` - -Then restore the previous branch. -</step> - -<step id="7" name="Publish"> +<step id="6" name="Publish"> **Gating:** Only non-GUI repos. Only when ALL approved PRs for the repo are merged. Skip if any were skipped/held back. - Ask for user confirmation: ``` Merged repos ready to publish (all PRs landed): @@ -215,11 +189,11 @@ After script completes: 2. If confirmed, push master and tag: `git push origin master && git push origin v<version>` 3. Prompt user to run `npm publish` in a real terminal (requires interactive 2FA) -**STOP HERE. Do NOT proceed to step 8 until the user confirms npm publish succeeded.** +**STOP HERE. Do NOT proceed to step 7 until the user confirms npm publish succeeded.** </step> -<step id="8" name="Update GUI Dependencies"> -**Trigger:** Only if non-`edge-react-gui` repos were merged and published in steps 5–7. +<step id="7" name="Update GUI Dependencies"> +**Trigger:** Only if non-`edge-react-gui` repos were merged and published in step 6. All non-GUI EdgeApp repos are GUI dependencies, so publishing always requires a GUI dep upgrade. Ask user to confirm `npm publish` completed, then: @@ -245,6 +219,33 @@ Ask user to confirm `npm publish` completed, then: If stash pop fails with conflicts, STOP and report. If "No stash entries", that's fine. </step> +<step id="8" name="Staging Cherry-Pick"> +**Trigger:** Only for `edge-react-gui` commits that target the `## X.Y.Z (staging)` CHANGELOG section (not `## Unreleased`). This includes both merged PR commits and GUI dependency upgrade commits from step 7. + +Check CHANGELOG diffs to determine which commits qualify — if the entry was added under a `(staging)` heading, it needs cherry-picking. + +**Skip** this step entirely if no commits have staging CHANGELOG entries. + +For qualifying PRs/commits, invoke the `/staging-cherry-pick` skill: + +```bash +echo '[{"repo":"edge-react-gui","prNumber":123,"mergeSha":"abc123"}]' | ~/.cursor/skills/staging-cherry-pick/scripts/staging-cherry-pick.sh +``` + +Pass the `mergeSha` from the merge step's JSON output. For dep upgrade commits, pass the commit SHA from step 7. The script cherry-picks individual (non-merge) commits onto the staging branch. + +**On exit 3 (CHANGELOG conflict):** Resolve semantically (existing staging entries first, then the new entry), then `git add CHANGELOG.md && GIT_EDITOR=true git cherry-pick --continue`. Re-run for remaining PRs. + +**On exit 1 (code conflict):** STOP and report to user. + +After cherry-picks succeed, ask user to confirm push: +```bash +git push origin staging +``` + +Then restore the previous branch. +</step> + <step id="9" name="Update Asana Tasks"> **Runs ONLY after ALL merges, cherry-picks, publishes, and GUI dep upgrades are complete.** diff --git a/.cursor/skills/pr-land/scripts/pr-land-discover.sh b/.cursor/skills/pr-land/scripts/pr-land-discover.sh index cc85e77..436071f 100755 --- a/.cursor/skills/pr-land/scripts/pr-land-discover.sh +++ b/.cursor/skills/pr-land/scripts/pr-land-discover.sh @@ -29,7 +29,10 @@ const edgeAppRepos = [ const PR_URL_RE = /^https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/; const PR_SHORT_RE = /^([a-z][a-z0-9-]+)#(\d+)$/; -const ASANA_URL_RE = /^https:\/\/app\.asana\.com\/\d+\/\d+\/(?:task\/)?(\d+)/; +// Matches both old (app.asana.com/0/<project>/<taskGid>) and new +// (app.asana.com/1/<workspace>/project/<projectId>/task/<taskGid>) URL formats. +// Strips query params via the [^?]* fallback. +const ASANA_URL_RE = /^https:\/\/app\.asana\.com\/(?:\d+\/\d+\/(?:project\/\d+\/)?(?:task\/)?(\d+))/; const repoArgs = []; const explicitPrs = []; // {repo, prNumber} diff --git a/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh b/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh index d8dbc7e..6b8013f 100755 --- a/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh +++ b/.cursor/skills/pr-land/scripts/pr-land-extract-asana-task.sh @@ -43,7 +43,7 @@ async function main() { process.exit(2); } - const regex = /https:\/\/app\.asana\.com\/\d+\/\d+\/(?:task\/)?(\d+)/i; + const regex = /https:\/\/app\.asana\.com\/(?:\d+\/\d+\/(?:project\/\d+\/)?(?:task\/)?(\d+))/i; const tasks = []; const missing = []; From 67855c12f0cf483168cfe72034e6c909e84da3fb Mon Sep 17 00:00:00 2001 From: j0ntz <jnthntzng@gmail.com> Date: Fri, 27 Mar 2026 18:19:18 -0700 Subject: [PATCH 6/6] pr-land: prefer GitHub integration attachments over notes, clarify npm publish flow --- .cursor/skills/pr-land/SKILL.md | 2 +- .../pr-land/scripts/pr-land-discover.sh | 39 +++++++++++++++---- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/.cursor/skills/pr-land/SKILL.md b/.cursor/skills/pr-land/SKILL.md index 2723eba..777d010 100644 --- a/.cursor/skills/pr-land/SKILL.md +++ b/.cursor/skills/pr-land/SKILL.md @@ -187,7 +187,7 @@ echo '[{"repo":"...","branch":"master"}]' | ~/.cursor/skills/pr-land/scripts/pr- After script completes: 1. Show version bump details to user 2. If confirmed, push master and tag: `git push origin master && git push origin v<version>` -3. Prompt user to run `npm publish` in a real terminal (requires interactive 2FA) +3. Prompt user to run `npm login` and `npm publish` in a real terminal. Both commands require browser-based authentication (npm opens a URL for web login + 2FA approval) that cannot be automated. **STOP HERE. Do NOT proceed to step 7 until the user confirms npm publish succeeded.** </step> diff --git a/.cursor/skills/pr-land/scripts/pr-land-discover.sh b/.cursor/skills/pr-land/scripts/pr-land-discover.sh index 436071f..bd869ba 100755 --- a/.cursor/skills/pr-land/scripts/pr-land-discover.sh +++ b/.cursor/skills/pr-land/scripts/pr-land-discover.sh @@ -145,23 +145,46 @@ async function main() { const results = { prs: [], errors: [] }; // 1. Resolve Asana tasks → explicit PRs + // GitHub integration attachments are the source of truth. + // Only fall back to scanning task notes if no attachments found. + const ghPrRe = + /https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/g; + for (const gid of asanaGids) { try { const task = await asanaGet( `/tasks/${gid}?opt_fields=name,notes,permalink_url` ); - // Look for GitHub PR URLs in task notes - const ghPrRe = - /https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/g; - let match; let found = false; - while ((match = ghPrRe.exec(task.notes || "")) !== null) { - explicitPrs.push({ repo: match[1], prNumber: Number(match[2]) }); - found = true; + + // Check task attachments first (GitHub integration — authoritative) + const attachments = await asanaGet( + `/tasks/${gid}/attachments?opt_fields=resource_subtype,view_url` + ); + for (const att of attachments) { + if (att.resource_subtype !== "external" || !att.view_url) continue; + const m = att.view_url.match( + /^https:\/\/github\.com\/EdgeApp\/([^/]+)\/pull\/(\d+)/ + ); + if (m) { + explicitPrs.push({ repo: m[1], prNumber: Number(m[2]) }); + found = true; + } } + + // Fall back to task notes only if no attachments matched + if (!found) { + let match; + while ((match = ghPrRe.exec(task.notes || "")) !== null) { + explicitPrs.push({ repo: match[1], prNumber: Number(match[2]) }); + found = true; + } + ghPrRe.lastIndex = 0; + } + if (!found) { results.errors.push( - `Asana task ${gid} (${task.name}): no GitHub PR link found in description` + `Asana task ${gid} (${task.name}): no GitHub PR link found in attachments or description` ); } } catch (e) {