diff --git a/claude_brain_sync.py b/claude_brain_sync.py new file mode 100644 index 0000000..2d17c33 --- /dev/null +++ b/claude_brain_sync.py @@ -0,0 +1,177 @@ +#!/usr/bin/env python +"""Sync Claude Code's auto-memory ("brain") to/from Databricks Workspace. + +The "brain" is the set of memory files Claude Code maintains at +`~/.claude/projects/{slug}/memory/`, one slug per working directory. +They accumulate user/project/feedback/reference memories that make +future sessions smarter. + +Ephemeral Databricks App compute means these files vanish when the +app restarts unless we persist them. This script syncs them to the +user's workspace so they survive redeploys and restarts. + +Usage: + python claude_brain_sync.py push # local -> workspace ([DEFAULT]) + python claude_brain_sync.py pull # workspace -> local ([DEFAULT]) + python claude_brain_sync.py push --profile daveok # use a named profile + python claude_brain_sync.py # push (default) +""" +from __future__ import annotations + +import argparse +import configparser +import os +import subprocess +import sys +from pathlib import Path + +try: + from databricks.sdk import WorkspaceClient +except ImportError: + print("databricks-sdk not available, skipping brain sync", file=sys.stderr) + sys.exit(0) + + +CLAUDE_PROJECTS = Path.home() / ".claude" / "projects" +WORKSPACE_SUBPATH = ".coda/claude-brain/projects" + + +def _read_databrickscfg(profile: str = "DEFAULT") -> tuple[str | None, str | None]: + cfg = Path.home() / ".databrickscfg" + if not cfg.exists(): + return None, None + p = configparser.ConfigParser() + p.read(cfg) + if profile not in p and profile != "DEFAULT": + return None, None + return ( + p.get(profile, "host", fallback=None), + p.get(profile, "token", fallback=None), + ) + + +def _workspace_client(profile: str | None) -> WorkspaceClient: + """Build a WorkspaceClient. Named profiles delegate auth to the SDK + so OAuth (`auth_type = databricks-cli`) works for local testing; + the default path reads [DEFAULT] explicitly for the production PAT flow.""" + if profile: + return WorkspaceClient(profile=profile) + host, token = _read_databrickscfg("DEFAULT") + if not host or not token: + raise RuntimeError("~/.databrickscfg [DEFAULT] missing host or token") + return WorkspaceClient(host=host, token=token, auth_type="pat") + + +def _user_email(profile: str | None) -> str: + return _workspace_client(profile).current_user.me().user_name + + +def _sync_env() -> dict[str, str]: + """Env for databricks CLI. Strip OAuth M2M vars so CLI falls through to + the profile config. Profile selection is passed via --profile CLI flag.""" + env = os.environ.copy() + for var in ("DATABRICKS_CLIENT_ID", "DATABRICKS_CLIENT_SECRET", + "DATABRICKS_HOST", "DATABRICKS_TOKEN"): + env.pop(var, None) + return env + + +def _profile_args(profile: str | None) -> list[str]: + return ["--profile", profile] if profile else [] + + +def _memory_dirs() -> list[Path]: + """Return memory dirs that actually contain files worth syncing.""" + if not CLAUDE_PROJECTS.exists(): + return [] + dirs = [] + for project_dir in CLAUDE_PROJECTS.iterdir(): + if not project_dir.is_dir(): + continue + memory = project_dir / "memory" + if memory.exists() and any(memory.iterdir()): + dirs.append(memory) + return dirs + + +def push(profile: str | None = None) -> int: + """Push each project's memory dir to workspace.""" + dirs = _memory_dirs() + if not dirs: + print("brain-sync: no memory dirs to push") + return 0 + + try: + email = _user_email(profile) + except Exception as e: + print(f"brain-sync: could not resolve user email: {e}", file=sys.stderr) + return 1 + + env = _sync_env() + profile_flags = _profile_args(profile) + failures = 0 + for memory_dir in dirs: + project_slug = memory_dir.parent.name + remote = f"/Workspace/Users/{email}/{WORKSPACE_SUBPATH}/{project_slug}/memory" + result = subprocess.run( + ["databricks", "sync", str(memory_dir), remote, "--watch=false"] + profile_flags, + capture_output=True, text=True, env=env, + ) + if result.returncode == 0: + print(f"brain-sync push: {project_slug}") + else: + print(f"brain-sync push FAILED for {project_slug}: {result.stderr.strip()}", + file=sys.stderr) + failures += 1 + return 0 if failures == 0 else 1 + + +def pull(profile: str | None = None) -> int: + """Pull brain from workspace into ~/.claude/projects/. + + Uses databricks workspace export-dir because `databricks sync` is + local->remote only. + """ + try: + email = _user_email(profile) + except Exception as e: + print(f"brain-sync: could not resolve user email: {e}", file=sys.stderr) + return 1 + + env = _sync_env() + profile_flags = _profile_args(profile) + remote_root = f"/Workspace/Users/{email}/{WORKSPACE_SUBPATH}" + + check = subprocess.run( + ["databricks", "workspace", "list", remote_root] + profile_flags, + capture_output=True, text=True, env=env, + ) + if check.returncode != 0: + print(f"brain-sync pull: no remote brain yet at {remote_root}") + return 0 + + CLAUDE_PROJECTS.mkdir(parents=True, exist_ok=True) + result = subprocess.run( + ["databricks", "workspace", "export-dir", + remote_root, str(CLAUDE_PROJECTS), "--overwrite"] + profile_flags, + capture_output=True, text=True, env=env, + ) + if result.returncode == 0: + print(f"brain-sync pull: restored from {remote_root}") + return 0 + print(f"brain-sync pull FAILED: {result.stderr.strip()}", file=sys.stderr) + return 1 + + +def main() -> int: + parser = argparse.ArgumentParser(description=__doc__.splitlines()[0]) + parser.add_argument("direction", nargs="?", default="push", choices=["push", "pull"]) + parser.add_argument("--profile", help="databricks CLI profile name (default: [DEFAULT])") + args = parser.parse_args() + if args.direction == "push": + return push(args.profile) + return pull(args.profile) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/coda-marketplace/.claude-plugin/marketplace.json b/coda-marketplace/.claude-plugin/marketplace.json new file mode 100644 index 0000000..21fab40 --- /dev/null +++ b/coda-marketplace/.claude-plugin/marketplace.json @@ -0,0 +1,50 @@ +{ + "name": "coda", + "owner": { + "name": "Databricks Field Engineering", + "email": "field-eng@databricks.com" + }, + "metadata": { + "description": "CODA-bundled Claude Code plugins — ship with every CODA deployment", + "version": "0.1.0" + }, + "plugins": [ + { + "name": "coda-essentials", + "source": "./plugins/coda-essentials", + "description": "Subagents, hooks, slash commands, and session lifecycle tooling bundled with every CODA instance. Includes the TDD subagent workflow (prd-writer, test-generator, implementer, build-feature), session-start git context loader, memory staleness checker, crystallization nudge, and the /til slash command.", + "version": "0.1.0", + "author": { + "name": "Databricks Field Engineering" + }, + "category": "productivity", + "keywords": [ + "coda", + "databricks", + "workshop", + "tdd", + "memory", + "hooks" + ] + }, + { + "name": "coda-databricks-skills", + "source": "./plugins/coda-databricks-skills", + "description": "Databricks platform skills synced from databricks-solutions/ai-dev-kit: Agent Bricks, AI/BI Dashboards, AI Functions, Databricks App (Python), BDD Testing, Bundles, Config, DBSQL, Docs, Execution Compute, Genie, Iceberg, Jobs, Lakebase (Autoscale + Provisioned), Metric Views, MLflow Evaluation, Model Serving, Python SDK, Spark SDP, Structured Streaming, Synthetic Data Gen, Unity Catalog, Unstructured PDF Generation, Vector Search, Zerobus Ingest, and Spark Python Data Source.", + "version": "0.1.0", + "author": { + "name": "Databricks Field Engineering", + "url": "https://github.com/databricks-solutions/ai-dev-kit" + }, + "category": "platform", + "keywords": [ + "databricks", + "ai-dev-kit", + "spark", + "unity-catalog", + "mlflow", + "lakebase" + ] + } + ] +} diff --git a/coda-marketplace/plugins/coda-essentials/.claude-plugin/plugin.json b/coda-marketplace/plugins/coda-essentials/.claude-plugin/plugin.json new file mode 100644 index 0000000..756bd57 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/.claude-plugin/plugin.json @@ -0,0 +1,18 @@ +{ + "name": "coda-essentials", + "description": "Subagents, hooks, slash commands, and session lifecycle tooling bundled with every CODA instance.", + "version": "0.1.0", + "author": { + "name": "Databricks Field Engineering" + }, + "keywords": [ + "coda", + "databricks", + "workshop", + "tdd", + "memory", + "hooks" + ], + "agents": "./agents/", + "commands": "./commands/" +} diff --git a/coda-marketplace/plugins/coda-essentials/agents/build-feature.md b/coda-marketplace/plugins/coda-essentials/agents/build-feature.md new file mode 100644 index 0000000..9a35777 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/agents/build-feature.md @@ -0,0 +1,66 @@ +--- +name: build-feature +description: End-to-end feature builder. Chains prd-writer → test-generator → implementer → web-devloop-tester in TDD flow. Use when asked to "build", "create", or "implement" a feature from scratch. Orchestrates the full cycle including bug fix loops and visual UI testing. +tools: Read, Write, Edit, Glob, Grep, Bash, Agent, AskUserQuestion, WebSearch, WebFetch +--- + +# Role +You are a tech lead orchestrating a TDD feature build. You coordinate four phases and handle failures. + +# Phase 1: PRD +1. Invoke yourself as a prd-writer: interview the user, write `docs/prd/.md` +2. Do NOT proceed until the user approves the PRD +3. PRD must have status `READY_FOR_IMPLEMENTATION` before moving on + +# Phase 2: Tests (TDD) +1. Read the approved PRD +2. Extract all Acceptance Criteria (AC-*) +3. Scan the codebase for test framework and conventions +4. Write failing tests that define the contract — one or more tests per AC +5. Run the tests to confirm they fail for the right reasons (missing implementation, not broken tests) +6. Update PRD status to `TESTS_WRITTEN` + +# Phase 3: Implementation +1. Read the PRD and all test files +2. Run the test suite to see current failures +3. Create an implementation plan, present it to the user for approval +4. Implement code to make tests pass, working through one group at a time +5. After each group, run tests to verify progress + +# Bug Fix Loop +If tests fail after implementation: + +1. Read the failure output carefully +2. Identify whether the bug is in the **test** or the **implementation** +3. If test is wrong (doesn't match PRD): fix the test +4. If implementation is wrong: fix the code +5. Re-run tests +6. **Max 3 fix loops** — if still failing after 3 rounds, stop and report to the user with: + - Which tests are failing + - The error messages + - Your hypothesis on the root cause + - Ask the user how to proceed + +# Phase 4: Visual Testing (Web Apps Only) +If the feature has a UI component (React, Vue, Streamlit, Dash, etc.): + +1. Spawn a `web-devloop-tester` agent (subagent_type: `fe-specialized-agents:web-devloop-tester`) +2. Tell it to: start the dev server, navigate to the relevant page, take screenshots, check console for errors, and test key interactions from the AC-* list +3. Review the tester's report: + - **All clear** → proceed to Completion + - **Issues found** → create fix tasks for the implementer, then re-test +4. **Max 3 visual fix loops** — if issues persist after 3 rounds, stop and report to the user with screenshots and logs + +Skip this phase for: +- CLI tools, libraries, backend-only APIs +- Projects with no dev server or browser UI + +# Completion +When all tests pass and visual testing is complete (or skipped): +1. Run the full test suite one final time +2. Update PRD status to `COMPLETE` +3. Summarize what was built: + - Files created/modified + - Test coverage (AC-* mapping) + - Visual test results (screenshots, if applicable) + - Any open items or manual testing needed diff --git a/coda-marketplace/plugins/coda-essentials/agents/implementer.md b/coda-marketplace/plugins/coda-essentials/agents/implementer.md new file mode 100644 index 0000000..2f6d088 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/agents/implementer.md @@ -0,0 +1,59 @@ +--- +name: implementer +description: Reads a PRD and makes all tests pass. Implements code to satisfy the test suite written by test-generator. Use after test-generator has written failing tests. Runs tests iteratively until green. +tools: Read, Write, Edit, Glob, Grep, Bash, Agent +--- + +# Role +You are a senior software engineer who makes failing tests pass. You implement exactly what's needed to satisfy the test suite and PRD requirements — nothing more. + +# Startup +1. Read the PRD file specified (or scan `docs/prd/` for files with status `TESTS_WRITTEN`) +2. Read ALL test files listed in the PRD status section +3. Run the test suite to see the current failures +4. Read any files referenced in the PRD's Technical Notes or Dependencies sections +5. Scan the codebase with Glob/Grep to understand existing patterns and architecture + +# Planning Phase +Before writing any code, create a numbered implementation plan: + +1. List every failing test and what it expects +2. Group tests by module/component +3. Identify files to create or modify +4. Note the order of operations (what depends on what) +5. Flag any Open Questions from the PRD that block implementation + +Present the plan and wait for approval before proceeding. + +# Implementation Phase — Red-Green Loop +For each group of related tests: + +1. **Read the tests** — understand exactly what they expect +2. **Write minimal code** to make those tests pass +3. **Run tests** — check if they pass +4. **If tests fail** — read the error, fix the code, run again +5. **Repeat** until that group is green +6. **Commit** — use `git commit -m "message"` directly +7. Move to the next group + +Rules: +- **Read before writing** — always read existing files before modifying +- **Follow existing patterns** — match the codebase's style and conventions +- **Keep it simple** — don't over-engineer; make the tests pass +- **Max 3 fix attempts per test** — if a test won't pass after 3 tries, flag it and move on + +# Final Validation +After all implementation: + +1. Run the FULL test suite +2. If any tests still fail, attempt fixes (max 2 more rounds) +3. If tests still fail after retries, document the failures + +# Handoff +When complete, update the PRD status: + +> **Status: IMPLEMENTED** +> Commits: +> Test results: +> If all green: **Status: COMPLETE** +> If failures remain: **Status: NEEDS_REVIEW** with failure details diff --git a/coda-marketplace/plugins/coda-essentials/agents/prd-writer.md b/coda-marketplace/plugins/coda-essentials/agents/prd-writer.md new file mode 100644 index 0000000..baf4aa0 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/agents/prd-writer.md @@ -0,0 +1,81 @@ +--- +name: prd-writer +description: Use when creating a new feature, epic, or project requirement. Interviews the user with clarifying questions, then generates a structured PRD markdown file ready for implementation. Use proactively when asked about new features or "what should we build". +tools: Read, Write, Glob, Grep, AskUserQuestion, WebSearch, WebFetch +--- + +# Role +You are a senior product manager who turns raw ideas into implementation-ready PRDs through Socratic questioning. + +# Discovery Phase +Before writing anything, interview the user with numbered clarifying questions (max 6 per round) covering: + +1. **Problem** — What problem are we solving and who does it affect? +2. **Success metrics** — How will we know this worked? What are the acceptance criteria? +3. **Scope boundaries** — What is explicitly OUT of scope? +4. **Technical constraints** — Any dependencies, existing systems, or limitations? +5. **Priority & timeline** — How urgent is this? What's the desired delivery window? +6. **Edge cases** — What happens when things go wrong? Error states? + +Use AskUserQuestion to present these as structured questions. WAIT for answers before proceeding. Ask follow-up rounds if answers are vague or incomplete. + +# Research Phase +If the feature involves external APIs, libraries, or patterns: +- Use WebSearch to find current best practices +- Use Glob/Grep to scan the existing codebase for related patterns, data models, and conventions +- Reference any existing PRDs in `docs/prd/` to follow established format and naming + +# Output Format +Write the PRD to `docs/prd/.md` using this structure: + +```markdown +# PRD: +**Author:** | **Date:** | **Status:** DRAFT + +## Problem Statement + + +## User Personas & Stories +- As a [user type], I want [action] so that [outcome] +- ... + +## Functional Requirements +1. FR-1: +2. FR-2: ... + +## Non-Functional Requirements +1. NFR-1: +2. NFR-2: ... + +## Acceptance Criteria +1. AC-1: Given [context], when [action], then [result] +2. AC-2: ... + +## Out of Scope +- + +## Dependencies +- + +## Open Questions +- + +## Technical Notes +- +- +``` + +# Iteration +After writing the first draft: +1. Present a summary to the user +2. Ask if any sections need refinement +3. Update the PRD based on feedback +4. Repeat until the user approves + +# Handoff +Once approved, update the status line and append: + +> **Status: READY_FOR_IMPLEMENTATION** +> Next steps (TDD flow): +> 1. test-generator writes failing tests from the Acceptance Criteria +> 2. implementer makes all tests pass diff --git a/coda-marketplace/plugins/coda-essentials/agents/test-generator.md b/coda-marketplace/plugins/coda-essentials/agents/test-generator.md new file mode 100644 index 0000000..f2f2d21 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/agents/test-generator.md @@ -0,0 +1,56 @@ +--- +name: test-generator +description: Reads a PRD's acceptance criteria and generates comprehensive tests BEFORE implementation (TDD). Maps each AC-* criterion to one or more test cases. Tests should initially fail — that's expected. Use after prd-writer and BEFORE the implementer. +tools: Read, Write, Edit, Glob, Grep, Bash +--- + +# Role +You are a senior QA engineer who writes tests FIRST (TDD style). You translate acceptance criteria into failing tests that define the contract the implementer must satisfy. + +# Startup +1. Read the PRD file specified by the user (or scan `docs/prd/` for files with status `READY_FOR_IMPLEMENTATION`) +2. Extract all Acceptance Criteria (AC-*) +3. Scan the codebase to understand the test framework, conventions, and existing test patterns +4. If code already exists, read it to understand the interfaces; if not, define the expected interfaces from the PRD + +# Test Strategy +Before writing tests, produce a test matrix: + +| AC | Test Name | Type | Description | +|----|-----------|------|-------------| +| AC-1 | test_... | unit | ... | +| AC-1 | test_... | integration | ... | +| AC-2 | test_... | unit | ... | + +Every AC must have at least one test. Include: +- **Happy path** — the AC scenario works as described +- **Edge cases** — boundary values, empty inputs, max limits +- **Error cases** — what happens when preconditions aren't met + +# Implementation Rules +1. **Match existing test patterns** — use the same framework, fixtures, helpers, and directory structure already in the project +2. **Name tests after ACs** — include the AC number in the test name or docstring (e.g., `test_ac1_user_can_login`) +3. **Keep tests independent** — no test should depend on another test's state +4. **Test behavior, not implementation** — tests should survive refactoring +5. **Define interfaces** — if the code doesn't exist yet, write tests against the interfaces/function signatures described in the PRD. Import from expected module paths. + +# Test Frameworks +Detect and use whatever the project already has: +- **Python**: pytest (use `uv run pytest`) +- **JS/TS**: jest, vitest, or mocha (use `npx`) +- **Other**: follow existing patterns + +# TDD Validation +After writing all tests: +1. Run the test suite — **tests SHOULD fail** (no implementation yet) +2. Confirm tests fail for the RIGHT reasons (import errors or missing functions, not syntax errors in tests) +3. List the expected failure count + +# Handoff +When complete, update the PRD status: + +> **Status: TESTS_WRITTEN** +> Test files: +> Failing tests: (expected — no implementation yet) +> AC coverage: +> Next: Ask the implementer to read `docs/prd/.md` and make all tests pass diff --git a/coda-marketplace/plugins/coda-essentials/commands/cache-stats.md b/coda-marketplace/plugins/coda-essentials/commands/cache-stats.md new file mode 100644 index 0000000..8e3fcc7 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/commands/cache-stats.md @@ -0,0 +1,52 @@ +--- +description: "Report prompt-cache hit rate + token savings for recent Claude Code sessions (reads MLflow traces)" +--- + +Analyse prompt-cache performance for this user's recent Claude Code sessions +in CODA. Traces are captured by `setup_mlflow.py` when MLflow tracing is +enabled; they include per-request token usage from Anthropic, which is +what reveals caching. + +## Steps + +1. **Check tracing is on.** + Read `os.environ.get("MLFLOW_CLAUDE_TRACING_ENABLED", "")`. If it's empty, + `"0"`, or `"false"` (case-insensitive), tell the user tracing is off and + stop — suggest they re-run setup with `MLFLOW_CLAUDE_TRACING_ENABLED=true` + or flip it in `app.yaml`. + +2. **Resolve the experiment path.** + The setup logs to `/Users/{email}/{app_name}` where: + - `email` = `APP_OWNER_EMAIL` env var, or `databricks current-user me` + - `app_name` = `DATABRICKS_APP_NAME` env var, or the basename of `$HOME` + +3. **Query recent traces.** Use `mlflow` (already installed in CODA) to list + the last ~50 traces in that experiment. Anthropic / Claude Code traces + carry per-call token usage on the root span outputs (or `info.tags`): + - `input_tokens` — uncached input + - `cache_read_input_tokens` — served from cache + - `cache_creation_input_tokens` — written to cache + - `output_tokens` + + Sum each across all traces. + +4. **Report a compact summary.** Include: + - **Hit rate** = `cache_read / (cache_read + input_tokens)`, as a % + - **Cached tokens served** (with the cost context that cache-read ≈ 10% + of base input price) + - **Totals**: input / cache_read / cache_creation / output + - **Estimated $ saved vs uncached** — assume Claude Opus pricing unless + `ANTHROPIC_MODEL` env var says otherwise: + `saved ≈ cache_read_tokens × (input_price − cache_read_price) / 1e6` + (Opus: input $15/MTok, cache_read $1.50/MTok → $13.50 saved per M + cache_read tokens.) + +5. **If hit rate < 50%, diagnose.** Likely causes in order: + - Prefix < 1024 tokens (Databricks passthrough minimum — won't cache) + - Sessions spaced > 5 min apart (ephemeral TTL expired) + - System prompt changed between calls (non-deterministic skill loading, + varying `CLAUDE.md` content, or model/route switch) + - Tracing only captures a subset of calls (check `MLFLOW_TRACE_SAMPLING`) + +Keep the output tight — 10-15 lines, not a report. This is observability, +not a presentation. diff --git a/coda-marketplace/plugins/coda-essentials/commands/til.md b/coda-marketplace/plugins/coda-essentials/commands/til.md new file mode 100644 index 0000000..9523e2a --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/commands/til.md @@ -0,0 +1,28 @@ +--- +description: "Capture what I learned today — fight cognitive passivity" +--- + +Review what we worked on in this session and extract what I should have learned. + +Create a brief TIL (Today I Learned) entry: + +### Concepts +- What Databricks/Python/React/infrastructure concepts came up? +- Which ones were new to me (based on what I asked about or seemed unfamiliar with)? +- Link to the relevant docs or source code + +### Decisions +- What key decisions were made and why? +- What were the alternatives we rejected? + +### Sharp edges +- What non-obvious gotchas did we encounter? +- What would I need to remember if I did this again without AI? + +### Could I do this solo? +Be honest. Based on this session: +- Which parts could I now do independently? +- Which parts would I still need AI for? +- What should I study to close that gap? + +Output as a compact markdown block I can save or paste into my notes. diff --git a/coda-marketplace/plugins/coda-essentials/hooks/check-memory-staleness.py b/coda-marketplace/plugins/coda-essentials/hooks/check-memory-staleness.py new file mode 100755 index 0000000..8c7d37f --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/hooks/check-memory-staleness.py @@ -0,0 +1,134 @@ +"""SessionStart hook: warn about stale Claude Code memory files. + +Scans ~/.claude/projects/*/memory/ and reports entries whose frontmatter +`last_verified` is missing or older than the threshold (default 30 days). + +Exit 0 = clean, exit 1 = stale memories found (warnings on stdout). +""" +from __future__ import annotations + +import argparse +import re +import sys +from datetime import date, timedelta +from pathlib import Path + +CLAUDE_DIR = Path.home() / ".claude" +PROJECTS_DIR = CLAUDE_DIR / "projects" +DEFAULT_STALE_DAYS = 30 + +FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---", re.DOTALL) +LAST_VERIFIED_RE = re.compile(r"^last_verified:\s*(\d{4}-\d{2}-\d{2})", re.MULTILINE) +NAME_RE = re.compile(r"^name:\s*(.+)", re.MULTILINE) +TYPE_RE = re.compile(r"^type:\s*(.+)", re.MULTILINE) + + +def cwd_to_project_slug(cwd: str) -> str: + return re.sub(r"[/.]", "-", cwd) + + +def slug_to_readable(slug: str) -> str: + home = str(Path.home()) + home_slug = re.sub(r"[/.]", "-", home) + if slug.startswith(home_slug): + return "~" + slug[len(home_slug):].replace("-", "/") + return slug.lstrip("-").replace("-", "/") + + +def parse_memory(path: Path) -> dict | None: + try: + text = path.read_text() + except OSError: + return None + m = FRONTMATTER_RE.search(text) + if not m: + return None + fm = m.group(1) + name = NAME_RE.search(fm) + verified = LAST_VERIFIED_RE.search(fm) + type_ = TYPE_RE.search(fm) + return { + "path": path, + "name": name.group(1).strip() if name else path.stem, + "type": type_.group(1).strip() if type_ else "unknown", + "last_verified": verified.group(1) if verified else None, + } + + +def check_staleness(threshold_days: int, project_slug: str | None) -> list[dict]: + if not PROJECTS_DIR.exists(): + return [] + today = date.today() + threshold = today - timedelta(days=threshold_days) + stale: list[dict] = [] + dirs = [PROJECTS_DIR / project_slug / "memory"] if project_slug \ + else sorted(PROJECTS_DIR.glob("*/memory")) + for memory_dir in dirs: + if not memory_dir.exists(): + continue + proj = memory_dir.parent.name + for md in sorted(memory_dir.glob("*.md")): + if md.name == "MEMORY.md": + continue + info = parse_memory(md) + if info is None: + continue + if info["last_verified"] is None: + stale.append({ + "project": proj, "name": info["name"], "type": info["type"], + "reason": "missing last_verified", "file": str(md), + }) + continue + try: + vdate = date.fromisoformat(info["last_verified"]) + except ValueError: + stale.append({ + "project": proj, "name": info["name"], "type": info["type"], + "reason": f"invalid date: {info['last_verified']}", + "file": str(md), + }) + continue + if vdate < threshold: + age = (today - vdate).days + stale.append({ + "project": proj, "name": info["name"], "type": info["type"], + "reason": f"{age}d since verified ({info['last_verified']})", + "file": str(md), + }) + return stale + + +def main() -> int: + parser = argparse.ArgumentParser() + parser.add_argument("--cwd") + parser.add_argument("--days", type=int, default=DEFAULT_STALE_DAYS) + parser.add_argument("--all", action="store_true") + args = parser.parse_args() + + slug = cwd_to_project_slug(args.cwd) if args.cwd and not args.all else None + stale = check_staleness(args.days, slug) + if not stale: + return 0 + + by_proj: dict[str, list[dict]] = {} + for e in stale: + by_proj.setdefault(e["project"], []).append(e) + + total = len(stale) + if slug: + lines = [f"Stale memories ({total}) in {slug_to_readable(slug)}:"] + for e in stale: + lines.append(f" - [{e['type']}] {e['name']}: {e['reason']}") + else: + lines = [f"Stale memories: {total} across {len(by_proj)} project(s)"] + for proj, entries in by_proj.items(): + lines.append(f" {slug_to_readable(proj)}: {len(entries)} stale") + for e in entries: + lines.append(f" - [{e['type']}] {e['name']}: {e['reason']}") + lines.append("\nUpdate `last_verified` to today's date after reviewing each memory.") + print("\n".join(lines)) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/coda-marketplace/plugins/coda-essentials/hooks/memory-stamp-verified.sh b/coda-marketplace/plugins/coda-essentials/hooks/memory-stamp-verified.sh new file mode 100755 index 0000000..7334789 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/hooks/memory-stamp-verified.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +# PostToolUse (Edit|Write) hook: stamp `last_verified: YYYY-MM-DD` on memory +# files after they are edited. Operates only on Claude Code auto-memory files +# under ~/.claude/projects/*/memory/*.md (excluding the MEMORY.md index). +# +# Uses GNU sed syntax (Linux). Do not use BSD sed forms (`sed -i ''`) here. + +set -euo pipefail + +filepath="${CLAUDE_FILE_PATH:-}" + +[[ -n "$filepath" ]] || exit 0 +[[ "$filepath" == *"/.claude/projects/"*"/memory/"* ]] || exit 0 +[[ "$filepath" == *.md ]] || exit 0 +[[ "$(basename "$filepath")" != "MEMORY.md" ]] || exit 0 +[[ -f "$filepath" ]] || exit 0 + +today=$(date +%Y-%m-%d) + +head -1 "$filepath" | grep -q '^---' || exit 0 + +if grep -q '^last_verified:' "$filepath"; then + sed -i "s/^last_verified:.*$/last_verified: $today/" "$filepath" +else + awk -v stamp="last_verified: $today" ' + /^---$/ { count++ } + count == 2 && inserted == 0 { print stamp; inserted = 1 } + { print } + ' "$filepath" > "${filepath}.tmp" && mv "${filepath}.tmp" "$filepath" +fi diff --git a/coda-marketplace/plugins/coda-essentials/hooks/mlflow-trace-stop.sh b/coda-marketplace/plugins/coda-essentials/hooks/mlflow-trace-stop.sh new file mode 100755 index 0000000..5917b81 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/hooks/mlflow-trace-stop.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash +# Stop hook: flush the Claude Code session transcript to an MLflow trace. +# +# Claude Code pipes the hook-event JSON to our stdin. We capture that +# synchronously (fast, bounded to one read) then background the actual +# flush with stdin redirected from the captured file. This way: +# +# - the wrapper returns in <1s, unblocking the Stop chain +# (crystallize-nudge, brain-push, /til) +# - a hard `timeout 30` caps the backgrounded handler so a stall in +# transcript processing can't hold memory/CPU indefinitely +# - stop_hook_handler() actually receives its hook-event JSON, which +# naive `nohup ... & disown` would have redirected to /dev/null + +set -euo pipefail + +APP_DIR="/app/python/source_code" +LOG="$HOME/.mlflow-hook.log" +STDIN_FILE="$(mktemp -t mlflow-hook.XXXXXX)" + +# Synchronous: read Claude Code's hook-event JSON from stdin. +cat > "$STDIN_FILE" + +# Async: run the handler in the background with the captured stdin. +# The subshell cleans up the temp file after timeout/handler exits. +nohup bash -c " + timeout 30 uv run --project '$APP_DIR' python -c \ + 'from mlflow.claude_code.hooks import stop_hook_handler; stop_hook_handler()' \ + < '$STDIN_FILE' + rm -f '$STDIN_FILE' +" >> "$LOG" 2>&1 & disown diff --git a/coda-marketplace/plugins/coda-essentials/hooks/push-brain-to-workspace.sh b/coda-marketplace/plugins/coda-essentials/hooks/push-brain-to-workspace.sh new file mode 100755 index 0000000..e86d2f3 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/hooks/push-brain-to-workspace.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# Stop hook: push Claude Code's auto-memory to Databricks Workspace so it +# survives app redeployment. Fire-and-forget: runs in background, never blocks. + +set -euo pipefail + +APP_DIR="/app/python/source_code" +SYNC_SCRIPT="$APP_DIR/claude_brain_sync.py" +LOG="$HOME/.brain-sync.log" + +[ -f "$SYNC_SCRIPT" ] || exit 0 + +nohup uv run --project "$APP_DIR" python "$SYNC_SCRIPT" push \ + >> "$LOG" 2>&1 & disown diff --git a/coda-marketplace/plugins/coda-essentials/hooks/session-context-loader.sh b/coda-marketplace/plugins/coda-essentials/hooks/session-context-loader.sh new file mode 100755 index 0000000..0fd7133 --- /dev/null +++ b/coda-marketplace/plugins/coda-essentials/hooks/session-context-loader.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# SessionStart hook: inject recent git activity into context so Claude +# knows what was happening last session. + +set -euo pipefail + +git rev-parse --git-dir >/dev/null 2>&1 || exit 0 + +branch=$(git branch --show-current 2>/dev/null || echo "detached") +repo_name=$(basename "$(git rev-parse --show-toplevel 2>/dev/null)" 2>/dev/null || basename "$PWD") + +recent_commits=$(git log --all --since="7 days ago" \ + --format="%h %ad %an: %s" --date=relative --max-count=10 2>/dev/null || true) + +author=$(git config user.name 2>/dev/null || echo "") +last_own_commit="" +if [ -n "$author" ]; then + last_own_commit=$(git log --author="$author" --format="%ad" \ + --date=relative --max-count=1 2>/dev/null || true) +fi + +status_summary=$(git status --short 2>/dev/null | head -15 || true) +status_count=$(git status --short 2>/dev/null | wc -l | tr -d ' ') + +active_branches=$(git for-each-ref --sort=-committerdate \ + --format='%(refname:short) (%(committerdate:relative))' \ + refs/heads/ --count=5 2>/dev/null || true) + +open_prs="" +if command -v gh >/dev/null 2>&1; then + open_prs=$(gh pr list --author="@me" --state=open \ + --json number,title,headRefName \ + --jq '.[] | "#\(.number) [\(.headRefName)] \(.title)"' \ + 2>/dev/null | head -5 || true) +fi + +ctx="Session context for ${repo_name} (branch: ${branch})" +[ -n "$last_own_commit" ] && ctx="${ctx} +Your last commit: ${last_own_commit}" +[ -n "$recent_commits" ] && ctx="${ctx} + +Recent commits (7d): +${recent_commits}" +if [ -n "$status_summary" ]; then + if [ "$status_count" -gt 15 ]; then + ctx="${ctx} + +Uncommitted changes (${status_count} files, showing first 15): +${status_summary}" + else + ctx="${ctx} + +Uncommitted changes: +${status_summary}" + fi +fi +[ -n "$active_branches" ] && ctx="${ctx} + +Active branches: +${active_branches}" +[ -n "$open_prs" ] && ctx="${ctx} + +Open PRs: +${open_prs}" + +json_ctx=$(printf '%s' "$ctx" | python3 -c 'import sys, json; print(json.dumps(sys.stdin.read()))') + +cat <=1 recent commit OR 3+ changed files. + +set -euo pipefail + +MIN_COMMITS=1 +MIN_CHANGED_FILES=3 +SINCE="2 hours ago" + +git rev-parse --git-dir >/dev/null 2>&1 || exit 0 + +author=$(git config user.name 2>/dev/null || echo "") +[ -n "$author" ] || exit 0 + +commit_count=$(git log --author="$author" --since="$SINCE" --oneline 2>/dev/null | wc -l | tr -d ' ') +changed_files=$(git diff --name-only HEAD 2>/dev/null | wc -l | tr -d ' ') +staged_files=$(git diff --cached --name-only 2>/dev/null | wc -l | tr -d ' ') +total_changed=$((changed_files + staged_files)) + +if [ "$commit_count" -ge "$MIN_COMMITS" ] || [ "$total_changed" -ge "$MIN_CHANGED_FILES" ]; then + summary="" + if [ "$commit_count" -gt 0 ]; then + summary="${commit_count} commit(s) this session" + fi + if [ "$total_changed" -gt 0 ]; then + if [ -n "$summary" ]; then + summary="$summary, ${total_changed} uncommitted file(s)" + else + summary="${total_changed} uncommitted changed file(s)" + fi + fi + + cat <///. +# Claude Code requires this layout — even directory-source marketplaces get +# their plugins copied into a versioned cache path, and `installPath` in +# installed_plugins.json must point at the cache, not at the source. +# Verified by inspecting a working fe-vibe install where the marketplace +# source lives at ~/Repos/vibe-ebc-fix but plugin installPath is +# ~/.claude/plugins/cache/fe-vibe/fe-html-slides/1.1.4. +PLUGIN_VERSION = "0.1.0" +plugin_cache_paths = {} +for pname in ("coda-essentials", "coda-databricks-skills"): + src_p = marketplace_dir / "plugins" / pname + dst_p = cache_root / pname / PLUGIN_VERSION + if dst_p.exists(): + shutil.rmtree(dst_p) + shutil.copytree(src_p, dst_p) + plugin_cache_paths[pname] = dst_p + print(f"Staged plugin {pname} -> {dst_p}") + +# Re-point hooks_dir at the cached coda-essentials so settings.json hooks +# reference the copy Claude Code actually loads, not the source tree. +# (Source and cache have identical contents; this keeps the hook path +# consistent with the plugin loader's view of the filesystem.) +hooks_dir = plugin_cache_paths["coda-essentials"] / "hooks" +if hooks_dir.exists(): + for hook in hooks_dir.iterdir(): + if hook.is_file(): + os.chmod(hook, 0o755) + +_now = _dt.datetime.now(_dt.timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.000Z") + +(plugins_state_dir / "known_marketplaces.json").write_text(json.dumps({ + "coda": { + "source": {"source": "directory", "path": str(marketplace_dir)}, + "installLocation": str(marketplace_dir), + "lastUpdated": _now, + } +}, indent=2)) + +(plugins_state_dir / "installed_plugins.json").write_text(json.dumps({ + "version": 2, + "plugins": { + "coda-essentials@coda": [{ + "scope": "user", + "installPath": str(plugin_cache_paths["coda-essentials"]), + "version": PLUGIN_VERSION, + "installedAt": _now, + "lastUpdated": _now, + }], + "coda-databricks-skills@coda": [{ + "scope": "user", + "installPath": str(plugin_cache_paths["coda-databricks-skills"]), + "version": PLUGIN_VERSION, + "installedAt": _now, + "lastUpdated": _now, + }], + }, +}, indent=2)) +print(f"Registered coda marketplace + plugins in {plugins_state_dir}") + +# Defence-in-depth: also copy commands/agents into ~/.claude/commands/ +# and ~/.claude/agents/ at the user level. Claude Code's plugin loader +# on the Databricks Apps runtime didn't surface plugin-bundled commands +# on first attempt; user-level paths are the canonical fallback and +# are always scanned regardless of plugin state. Running both keeps the +# marketplace as the source of truth for content while guaranteeing the +# slash commands + subagents actually work. +user_commands_dir = claude_dir / "commands" +user_commands_dir.mkdir(exist_ok=True) +user_agents_dir = claude_dir / "agents" +user_agents_dir.mkdir(exist_ok=True) + +for src_commands in [plugin_cache_paths["coda-essentials"] / "commands"]: + if src_commands.exists(): + for f in src_commands.glob("*.md"): + shutil.copy2(str(f), str(user_commands_dir / f.name)) +print(f"User-level commands synced: {sorted(p.name for p in user_commands_dir.glob('*.md'))}") + +for src_agents in [plugin_cache_paths["coda-essentials"] / "agents"]: + if src_agents.exists(): + for f in src_agents.glob("*.md"): + shutil.copy2(str(f), str(user_agents_dir / f.name)) +print(f"User-level agents synced: {sorted(p.name for p in user_agents_dir.glob('*.md'))}") + # 1. Write settings.json for Databricks model serving (requires DATABRICKS_TOKEN) token = os.environ.get("DATABRICKS_TOKEN", "").strip() if token: @@ -50,22 +162,182 @@ settings["env"]["ANTHROPIC_CUSTOM_HEADERS"] = "x-databricks-use-coding-agent-mode: true" settings["env"]["CLAUDE_CODE_DISABLE_EXPERIMENTAL_BETAS"] = "1" + # Plugin marketplace, theme, permissions, and hooks (from coda-essentials). + # Assigned per-key on top of the read-merge-write env block above so we + # preserve the PR #153 race fix — overwriting `settings` wholesale would + # drop env vars that other setup scripts (e.g. setup_mlflow.py) wrote. + settings["theme"] = "dark" + settings["outputStyle"] = "Explanatory" + settings.setdefault("extraKnownMarketplaces", {}) + settings["extraKnownMarketplaces"]["coda"] = { + "source": { + "source": "directory", + "path": str(marketplace_dir), + }, + } + settings.setdefault("enabledPlugins", {}) + settings["enabledPlugins"]["coda-essentials@coda"] = True + settings["enabledPlugins"]["coda-databricks-skills@coda"] = True + settings["permissions"] = { + "defaultMode": "auto", + "allow": [ + "Bash(databricks *)", + "Bash(uv *)", + "Bash(git *)", + "Bash(make *)", + "Bash(python *)", + "Bash(pytest *)", + "Bash(ruff *)", + "Bash(wsync)", + "Bash(databricks sync * /Workspace/Shared/apps/coding-agents*)", + "Bash(databricks workspace import /Workspace/Shared/apps/coding-agents/*)", + "Bash(databricks workspace import-dir * /Workspace/Shared/apps/coding-agents*)", + ], + "deny": [ + # Process kills that would take down the gunicorn worker (single-worker app) + "Bash(pkill *)", + "Bash(pkill)", + "Bash(killall *)", + "Bash(fuser -k *)", + "Bash(kill 1)", + "Bash(kill -9 1)", + "Bash(kill -- -1)", + # Catastrophic filesystem deletion (would wipe app source / home) + "Bash(rm -rf /)", + "Bash(rm -rf /*)", + "Bash(rm -rf /app*)", + "Bash(rm -rf ~)", + "Bash(rm -rf ~/*)", + "Bash(rm -rf $HOME)", + "Bash(rm -rf $HOME/*)", + # Credential/config destruction (breaks auth + PAT rotator) + "Bash(rm ~/.databrickscfg*)", + "Bash(rm -rf ~/.claude*)", + # Shared Workspace paths that other apps depend on + "Bash(rm -rf /Workspace*)", + "Bash(databricks workspace delete /Workspace/Shared*)", + "Bash(databricks workspace delete-dir /Workspace/Shared*)", + # Don't delete other users' coda apps + "Bash(databricks apps delete *)", + # System-level destructive + "Bash(shutdown *)", + "Bash(reboot *)", + "Bash(halt *)", + "Bash(mkfs *)", + "Bash(dd if=* of=/dev/*)", + "Bash(chmod -R * /app*)", + "Bash(chown -R * /app*)", + ], + } + settings["hooks"] = { + "SessionStart": [{ + "matcher": "", + "hooks": [ + {"type": "command", + "command": f"python3 {hooks_dir}/check-memory-staleness.py --cwd \"$PWD\"", + "timeout": 10}, + {"type": "command", + "command": f"bash {hooks_dir}/session-context-loader.sh", + "timeout": 15}, + ], + }], + "PostToolUse": [{ + "matcher": "Edit|Write", + "hooks": [{ + "type": "command", + "command": f"bash {hooks_dir}/memory-stamp-verified.sh", + "timeout": 5, + }], + }], + "Stop": [{ + "matcher": "", + "hooks": [ + {"type": "command", + "command": f"bash {hooks_dir}/session-crystallize-nudge.sh", + "timeout": 10}, + {"type": "command", + "command": f"bash {hooks_dir}/push-brain-to-workspace.sh", + "timeout": 5}, + ], + }], + } + settings_path.write_text(json.dumps(settings, indent=2)) print(f"Claude configured: {settings_path}") + + # 1b. Secure-egress network detection. If docs.databricks.com is blocked + # (common in enterprise Azure workspaces with a restrictive outbound + # allowlist), append a note to ~/.claude/CLAUDE.md telling agents to + # substitute learn.microsoft.com/en-us/azure/databricks/ — Microsoft + # Learn mirrors the Azure Databricks docs one-to-one and is usually + # allowlisted by default. Idempotent via a marker comment. + import urllib.request # stdlib, no extra deps + _egress_marker = "" + _egress_note = ( + f"\n{_egress_marker}\n" + "## Documentation fallback — secure-egress workspace\n" + "`docs.databricks.com` is blocked from this environment. " + "When looking up Databricks docs, rewrite URLs:\n" + "- `docs.databricks.com/azure/en/X` → `learn.microsoft.com/en-us/azure/databricks/X`\n" + "- `docs.databricks.com/aws/en/X` → `learn.microsoft.com/en-us/azure/databricks/X`\n" + "Microsoft Learn mirrors the Azure Databricks docs one-to-one and is usually reachable.\n" + ) + try: + urllib.request.urlopen("https://docs.databricks.com/", timeout=3) + print("docs.databricks.com reachable — no egress fallback needed") + except Exception as _e: + print(f"docs.databricks.com unreachable ({type(_e).__name__}) — installing learn.microsoft.com fallback note") + _claude_md = claude_dir / "CLAUDE.md" + _existing = _claude_md.read_text() if _claude_md.exists() else "" + if _egress_marker not in _existing: + with open(_claude_md, "a") as _f: + _f.write(_egress_note) + print(f"Appended egress fallback note to {_claude_md}") + + # 1c. Fork-specific directives. These are conventions that apply to every + # CODA spawned from this repo — package manager, required library floors, + # the working Unity Catalog, and the terminal-editor pointer. Mirrors the + # egress-fallback pattern above: idempotent via a marker comment, appended + # to ~/.claude/CLAUDE.md so Claude sees them in every session regardless + # of cwd. Update this block when fork-wide conventions change. + _fork_marker = "" + _fork_note = ( + f"\n{_fork_marker}\n" + "## CODA fork directives (always active)\n\n" + "### Python packaging\n" + "Always use `uv` for Python work. Never `pip install` directly — " + "`uv add` for new deps, `uv sync` to install, `uv run` to execute.\n\n" + "### Required library version floors\n" + "When scaffolding or updating a `pyproject.toml`, pin at least:\n" + "- `mlflow >= 3.11`\n" + "- `databricks-sdk >= 0.100.0`\n" + "Bump older pins rather than matching them. Do not downgrade.\n\n" + "### Unity Catalog\n" + "The working catalog in this environment is `edp_aisandbox_aisandbox_dev`. " + "Place new schemas, tables, volumes, and pipelines under this catalog " + "unless the user explicitly names another. " + "Example: `edp_aisandbox_aisandbox_dev.my_schema.my_table`.\n\n" + "### Terminal editors\n" + "`micro` is pre-installed at `~/.local/bin/micro` (Ctrl-S save, Ctrl-Q quit, " + "mouse support, no modal editing — safe default to recommend). " + "For other editors, check `~/.local/share/coda/editors.txt` — generated " + "at app startup, lists every editor detected via `command -v`. " + "If a user asks for vim/emacs and the file shows they're missing, say so " + "rather than guessing.\n" + ) + _claude_md = claude_dir / "CLAUDE.md" + _existing = _claude_md.read_text() if _claude_md.exists() else "" + if _fork_marker not in _existing: + with open(_claude_md, "a") as _f: + _f.write(_fork_note) + print(f"Appended fork directives to {_claude_md}") + else: + print(f"Fork directives already present in {_claude_md}") else: print("No DATABRICKS_TOKEN — skipping settings.json (will be configured after PAT setup)") # 2. Write ~/.claude.json with onboarding skip AND MCP servers -mcp_servers = { - "deepwiki": { - "type": "http", - "url": "https://mcp.deepwiki.com/mcp" - }, - "exa": { - "type": "http", - "url": "https://mcp.exa.ai/mcp" - } -} +mcp_servers = {} # Auto-configure team-memory MCP if URL is provided team_memory_url = os.environ.get("TEAM_MEMORY_MCP_URL", "").strip().rstrip("/") @@ -76,6 +348,14 @@ } print(f"Team memory MCP configured: {team_memory_url}/mcp") +# Public-internet MCPs (deepwiki, exa) are opt-in: they live on the open +# internet and won't work in air-gapped or secure-egress deployments. Set +# ENABLE_PUBLIC_MCPS=true only when you know the runtime can reach them. +if os.environ.get("ENABLE_PUBLIC_MCPS", "").strip().lower() in ("1", "true", "yes"): + mcp_servers["deepwiki"] = {"type": "http", "url": "https://mcp.deepwiki.com/mcp"} + mcp_servers["exa"] = {"type": "http", "url": "https://mcp.exa.ai/mcp"} + print("Public MCPs enabled (ENABLE_PUBLIC_MCPS=true): deepwiki, exa") + claude_json = { "hasCompletedOnboarding": True, "mcpServers": mcp_servers @@ -102,21 +382,8 @@ else: print(f"CLI install warning: {result.stderr}") -# 4. Copy subagent definitions to ~/.claude/agents/ -# These enable TDD workflow: prd-writer → test-generator → implementer → build-feature -agents_src = Path(__file__).parent / "agents" -agents_dst = claude_dir / "agents" -agents_dst.mkdir(exist_ok=True) - -if agents_src.exists(): - copied = [] - for agent_file in agents_src.glob("*.md"): - shutil.copy2(str(agent_file), str(agents_dst / agent_file.name)) - copied.append(agent_file.name) - if copied: - print(f"Subagents installed: {', '.join(copied)}") -else: - print("No agents directory found, skipping subagent setup") +# 4. Subagents are discovered automatically from coda-essentials plugin +# (no manual copy step needed — the plugin's agents/ dir is scanned by Claude Code). # 5. Create projects directory projects_dir = home / "projects" @@ -126,3 +393,22 @@ # 5. Git identity and hooks are now configured by app.py's _setup_git_config() # (runs directly in Python before setup_claude.py, writes ~/.gitconfig and ~/.githooks/) print("Git identity and hooks: configured by app.py (skipping here)") + +# 6. Restore Claude Code auto-memory ("brain") from workspace if present. +# This makes accumulated memories survive app redeployment. Best-effort — +# failures are logged but don't break startup. +if token: + brain_sync = Path(__file__).parent / "claude_brain_sync.py" + if brain_sync.exists(): + try: + result = subprocess.run( + [sys.executable, str(brain_sync), "pull"], + capture_output=True, text=True, timeout=60, + env={**os.environ, "HOME": str(home)}, + ) + if result.stdout: + print(result.stdout.strip()) + if result.returncode != 0 and result.stderr: + print(f"brain-sync pull warning: {result.stderr.strip()}") + except Exception as e: + print(f"brain-sync pull skipped: {e}")