diff --git a/.gitignore b/.gitignore index aa409ec..4e51137 100644 --- a/.gitignore +++ b/.gitignore @@ -53,5 +53,6 @@ Desktop.ini # ─── Internal / non-public ──────────────────────────────────────────────────── .specify/ .opencode/ +.claude/ specs/ TODO.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 847f7e5..e56c198 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,91 +7,71 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] -## [1.0.0] - 2026-03-20 +## [1.5.0] - 2026-04-01 ### Added -- Initial public release of Devvami -- DevEx CLI for developers and teams -- Commands: auth, costs, create, docs, pipeline, pr, repo, tasks -- Support for GitHub, AWS, and task management integrations -- Configuration wizard and environment diagnostics - - -### 🐛 Bug Fixes - -* **tasks:** correct today filter to use date range and exclude closed status ([#10](https://github.com/santagostino/cli-santagostino/issues/10)) ([36003e4](https://github.com/santagostino/cli-santagostino/commit/36003e4e2ace164b913ad9954794f5d3381e25a4)) - -## [1.7.0](https://github.com/santagostino/cli-santagostino/compare/v1.6.0...v1.7.0) (2026-03-19) - -### ✨ Features - -* **help:** add animated SNTG logo and categorized command layout ([#9](https://github.com/santagostino/cli-santagostino/issues/9)) ([58c5581](https://github.com/santagostino/cli-santagostino/commit/58c5581e99123b6ce66ebbdd2fd42a2990534a80)) - -## [1.6.0](https://github.com/santagostino/cli-santagostino/compare/v1.5.0...v1.6.0) (2026-03-19) +- `dvmi sync-config-ai` — interactive TUI to manage AI coding tool configurations (MCP servers, commands, skills, agents) across VS Code Copilot, Claude Code, OpenCode, Gemini CLI, and GitHub Copilot CLI +- 5-tab TUI layout: Environments (read-only) + dedicated tab per category type (MCPs | Commands | Skills | Agents) +- Inline forms with Environments multi-select filtered by compatibility matrix +- OpenCode global detection via `~/.config/opencode/` +- `--json` flag for non-interactive / CI use -### ✨ Features +## [1.4.2] - 2026-03-29 -* **docs:** add sntg docs commands (list, read, search, projects) ([#8](https://github.com/santagostino/cli-santagostino/issues/8)) ([be92259](https://github.com/santagostino/cli-santagostino/commit/be92259ffdcb6a82b6f6cc11df4c736f9c71828a)) +### Changed -## [1.5.0](https://github.com/santagostino/cli-santagostino/compare/v1.4.0...v1.5.0) (2026-03-19) +- **vuln:** extend navigable TUI table to `dvmi vuln scan` ([#10](https://github.com/savez/devvami/issues/10)) -### ✨ Features +## [1.4.1] - 2026-03-29 -* improve list tables with search, colors and remove repo limit ([#7](https://github.com/santagostino/cli-santagostino/issues/7)) ([4ad25fb](https://github.com/santagostino/cli-santagostino/commit/4ad25fbaa1d827d5ca41672b22c29c8507e410a6)) +### Changed -## [1.4.0](https://github.com/santagostino/cli-santagostino/compare/v1.3.0...v1.4.0) (2026-03-18) +- **vuln:** interactive CVE table with navigable TUI and modal overlay — spec 006 ([#9](https://github.com/savez/devvami/issues/9)) -### ✨ Features +## [1.4.0] - 2026-03-28 -* add link column to tasks, fix today filter timezone/status, add clickup to whoami ([#6](https://github.com/santagostino/cli-santagostino/issues/6)) ([2db8a41](https://github.com/santagostino/cli-santagostino/commit/2db8a41c74d85a84eb010c84dcc84d990798d4a6)) - -### 🐛 Bug Fixes +### Added -* **ci:** disable footer-max-line-length to allow semantic-release changelog URLs ([4a4b392](https://github.com/santagostino/cli-santagostino/commit/4a4b392ab6a880f28cf3c009508a72bf13466044)) +- **dotfiles:** `dvmi dotfiles` commands — setup, add, sync, status — with age encryption and chezmoi integration ([#8](https://github.com/savez/devvami/issues/8)) -## [1.3.0](https://github.com/santagostino/cli-santagostino/compare/v1.2.0...v1.3.0) (2026-03-18) +## [1.3.0] - 2026-03-27 -### ✨ Features +### Added -* add ClickUp configuration wizard & remove branch create command ([b8c3634](https://github.com/santagostino/cli-santagostino/commit/b8c36348c45df904f02f06227e9ad210bc7a88ab)) -* add ClickUp configuration wizard to init command ([085defe](https://github.com/santagostino/cli-santagostino/commit/085defe562fd83bf699d2883ad535ddf9e321ec6)) +- **aws:** `dvmi costs trend`, CloudWatch logs (`dvmi logs`), and aws-vault integration ([#7](https://github.com/savez/devvami/issues/7)) -## [1.2.0](https://github.com/santagostino/cli-santagostino/compare/v1.1.0...v1.2.0) (2026-03-18) +## [1.2.0] - 2026-03-25 -### ✨ Features +### Added -* aggiunge pr detail con QA steps/comments e pr review dedicato ([66b8f76](https://github.com/santagostino/cli-santagostino/commit/66b8f764b7d2a474b913d6c1e2a69d35cb16f26c)) -* sntg pr detail + sntg pr review ([bcf9da1](https://github.com/santagostino/cli-santagostino/commit/bcf9da113ab8881e21c12766f292d1f90443e03e)) +- **security:** `dvmi security setup` wizard with automated security checks ([#6](https://github.com/savez/devvami/issues/6)) +- Welcome message on first run -## [1.1.0](https://github.com/santagostino/cli-santagostino/compare/v1.0.0...v1.1.0) (2026-03-18) +## [1.1.2] - 2026-03-24 -### ✨ Features +### Fixed -* **cli:** redesign visivo con gradient animato, emoji e spinner brand-styled ([e452f25](https://github.com/santagostino/cli-santagostino/commit/e452f25f85523a4a34a4e8a79bfd16c123cdef25)) -* **cli:** redesign visivo con gradient animato, emoji e spinner brand-styled ([313d718](https://github.com/santagostino/cli-santagostino/commit/313d71893229af4d14bde051f8610c48d2bc984f)) +- **init:** stop ora spinner before interactive prompts to prevent TTY freeze on macOS ([#5](https://github.com/savez/devvami/issues/5)) -### 🐛 Bug Fixes +## [1.1.1] - 2026-03-23 -* **cli:** rimuovi --registry da npm install in upgrade per evitare 404 sulle dipendenze ([0fe4395](https://github.com/santagostino/cli-santagostino/commit/0fe4395b6a2c42fa93917bd4a8132927f33e4c40)) -* **cli:** version check usa gh releases API invece di npm view ([0e78496](https://github.com/santagostino/cli-santagostino/commit/0e7849652d5606113838862f80e5ca594b3ca116)) +### Fixed -## 1.0.0 (2026-03-18) +- Apply security fixes and add pre-push version sync hook ([#4](https://github.com/savez/devvami/issues/4)) +- Apply 7 security fixes from ZeroTrustino audit ([#3](https://github.com/savez/devvami/issues/3)) -### ✨ Features +## [1.1.0] - 2026-03-23 -* refactor commands as pure topics and improve help discoverability ([0dda165](https://github.com/santagostino/cli-santagostino/commit/0dda165385f4df665148dba19af24b238a527e93)) +### Added -### 🐛 Bug Fixes +- **prompts:** AI prompt hub — `dvmi prompts browse`, `download`, `run`, `list` ([#2](https://github.com/savez/devvami/issues/2)) -* **ci:** aggiungi pnpm-lock.yaml al repo e rimuovilo dal gitignore ([77abeca](https://github.com/santagostino/cli-santagostino/commit/77abeca996e9339b844940089794e87b14e5432d)) -* correggere sntg upgrade che non rilevava nuove versioni ([a922655](https://github.com/santagostino/cli-santagostino/commit/a922655bcda93dff89026dab786b1a2affb9c850)) -* **ci:** disabilita body-max-line-length e rimuovi eslint-disable sec… ([4c99d98](https://github.com/santagostino/cli-santagostino/commit/4c99d9856800d4e7070d15545770b42af4b63980)) -* **ci:** disabilita body-max-line-length e rimuovi eslint-disable security non valido ([b75f872](https://github.com/santagostino/cli-santagostino/commit/b75f872eacf038b921cbfbc981065a0401650c87)) -* **ci:** rimuovi import readFile inutilizzato nel test version-check ([a630458](https://github.com/santagostino/cli-santagostino/commit/a630458a1f1d97b9f961fd1411c81dd1a0f564b9)) +## [1.0.0] - 2026-03-20 -### 📚 Documentation +### Added -* aggiungi README con istruzioni installazione e aggiornamento CLI ([4ecb16b](https://github.com/santagostino/cli-santagostino/commit/4ecb16bc7b7780732077ba1c5b899947383c3f77)) -* migliora README e aggiungi TODO.md al gitignore ([5116dc9](https://github.com/santagostino/cli-santagostino/commit/5116dc9f37f542843b47a07afe6daaf341e805eb)) -* README in stile opensource con badge, emoji e sezione sviluppo locale ([ad720b5](https://github.com/santagostino/cli-santagostino/commit/ad720b59cbf700986861ab074da70ba4eccf8166)) +- Initial public open-source release of Devvami +- Commands: `auth`, `costs`, `create`, `docs`, `pipeline`, `pr`, `repo`, `tasks`, `branch`, `doctor`, `init`, `upgrade`, `whoami` +- GitHub, AWS, and ClickUp integrations +- Configuration wizard (`dvmi init`) and environment diagnostics (`dvmi doctor`) diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..518c44e --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,29 @@ +# devvami Development Guidelines + +Auto-generated from all feature plans. Last updated: 2026-04-01 + +## Active Technologies + +- JavaScript (ESM, `.js`) with JSDoc — Node.js >= 24 + `@oclif/core` v4, `chalk` v5, `ora` v8, `execa` v9 — zero new TUI dependencies (007-sync-ai-config-tui) + +## Project Structure + +```text +src/ +tests/ +``` + +## Commands + +npm test && npm run lint + +## Code Style + +JavaScript (ESM, `.js`) with JSDoc — Node.js >= 24: Follow standard conventions + +## Recent Changes + +- 007-sync-ai-config-tui: Added JavaScript (ESM, `.js`) with JSDoc — Node.js >= 24 + `@oclif/core` v4, `chalk` v5, `ora` v8, `execa` v9 — zero new TUI dependencies + + + diff --git a/package.json b/package.json index 84cdda4..8f2d1a6 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "devvami", "description": "DevEx CLI for developers and teams — manage repos, PRs, pipelines, tasks, and costs from the terminal", - "version": "1.4.2", + "version": "1.5.0", "author": "", "type": "module", "bin": { @@ -139,6 +139,7 @@ "test:integration": "vitest run --project integration", "test:watch": "vitest", "test:coverage": "vitest run --coverage", + "pretest": "oclif manifest", "prepack": "oclif manifest", "postpack": "shx rm -f oclif.manifest.json", "prepare": "lefthook install" diff --git a/src/commands/auth/login.js b/src/commands/auth/login.js index 5f88392..377f397 100644 --- a/src/commands/auth/login.js +++ b/src/commands/auth/login.js @@ -1,8 +1,8 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { checkGitHubAuth, loginGitHub, checkAWSAuth, loginAWS } from '../../services/auth.js' -import { loadConfig } from '../../services/config.js' +import {checkGitHubAuth, loginGitHub, checkAWSAuth, loginAWS} from '../../services/auth.js' +import {loadConfig} from '../../services/config.js' export default class AuthLogin extends Command { static description = 'Autenticazione centralizzata GitHub + AWS' @@ -16,36 +16,38 @@ export default class AuthLogin extends Command { static enableJsonFlag = true static flags = { - github: Flags.boolean({ description: 'Solo autenticazione GitHub', default: false }), - aws: Flags.boolean({ description: 'Solo autenticazione AWS', default: false }), - verbose: Flags.boolean({ description: 'Output dettagliato', default: false }), + github: Flags.boolean({description: 'Solo autenticazione GitHub', default: false}), + aws: Flags.boolean({description: 'Solo autenticazione AWS', default: false}), + verbose: Flags.boolean({description: 'Output dettagliato', default: false}), } async run() { - const { flags } = await this.parse(AuthLogin) + const {flags} = await this.parse(AuthLogin) const isJson = flags.json const doGitHub = !flags.aws || flags.github const doAWS = !flags.github || flags.aws - const result = { github: null, aws: null } + const result = {github: null, aws: null} // GitHub auth if (doGitHub) { - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking GitHub auth...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking GitHub auth...')}).start() let ghStatus = await checkGitHubAuth() if (ghStatus.authenticated) { spinner?.succeed(`GitHub: already authenticated as @${ghStatus.username}`) - result.github = { status: 'ok', username: ghStatus.username, org: '' } + result.github = {status: 'ok', username: ghStatus.username, org: ''} } else { if (spinner) spinner.text = 'Logging in to GitHub...' ghStatus = await loginGitHub() if (ghStatus.authenticated) { spinner?.succeed(`GitHub: authenticated as @${ghStatus.username}`) - result.github = { status: 'ok', username: ghStatus.username, org: '' } + result.github = {status: 'ok', username: ghStatus.username, org: ''} } else { spinner?.fail('GitHub authentication failed') - result.github = { status: 'error', error: ghStatus.error } + result.github = {status: 'error', error: ghStatus.error} } } } @@ -53,21 +55,23 @@ export default class AuthLogin extends Command { // AWS auth if (doAWS) { const config = await loadConfig() - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking AWS auth...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking AWS auth...')}).start() let awsStatus = await checkAWSAuth() if (awsStatus.authenticated) { spinner?.succeed(`AWS: session active for account ${awsStatus.account}`) - result.aws = { status: 'ok', account: awsStatus.account, role: awsStatus.role } + result.aws = {status: 'ok', account: awsStatus.account, role: awsStatus.role} } else { if (spinner) spinner.text = 'Logging in to AWS via aws-vault...' awsStatus = await loginAWS(config.awsProfile || 'default') if (awsStatus.authenticated) { spinner?.succeed(`AWS: logged in to account ${awsStatus.account}`) - result.aws = { status: 'ok', account: awsStatus.account, role: awsStatus.role } + result.aws = {status: 'ok', account: awsStatus.account, role: awsStatus.role} } else { spinner?.fail('AWS authentication failed') - result.aws = { status: 'error', error: awsStatus.error } + result.aws = {status: 'error', error: awsStatus.error} } } } diff --git a/src/commands/changelog.js b/src/commands/changelog.js index c7a675b..42cd27c 100644 --- a/src/commands/changelog.js +++ b/src/commands/changelog.js @@ -1,6 +1,6 @@ -import { Command, Flags } from '@oclif/core' -import { writeFile } from 'node:fs/promises' -import { exec } from '../services/shell.js' +import {Command, Flags} from '@oclif/core' +import {writeFile} from 'node:fs/promises' +import {exec} from '../services/shell.js' /** * Parse a conventional commit message. @@ -10,7 +10,7 @@ import { exec } from '../services/shell.js' function parseConventionalCommit(message) { const match = message.match(/^(\w+)(?:\(([^)]+)\))?!?: (.+)/) if (!match) return null - return { type: match[1], scope: match[2] ?? '', description: match[3] } + return {type: match[1], scope: match[2] ?? '', description: match[3]} } export default class Changelog extends Command { @@ -25,13 +25,13 @@ export default class Changelog extends Command { static enableJsonFlag = true static flags = { - from: Flags.string({ description: 'Tag o commit di partenza (default: ultimo tag)' }), - to: Flags.string({ description: 'Commit finale (default: HEAD)', default: 'HEAD' }), - output: Flags.string({ description: 'Scrivi su file (default: stdout)' }), + from: Flags.string({description: 'Tag o commit di partenza (default: ultimo tag)'}), + to: Flags.string({description: 'Commit finale (default: HEAD)', default: 'HEAD'}), + output: Flags.string({description: 'Scrivi su file (default: stdout)'}), } async run() { - const { flags } = await this.parse(Changelog) + const {flags} = await this.parse(Changelog) const isJson = flags.json // Determine from ref @@ -51,13 +51,13 @@ export default class Changelog extends Command { const lines = logResult.stdout.split('\n').filter(Boolean) /** @type {Record>} */ - const sections = { feat: [], fix: [], chore: [], docs: [], refactor: [], test: [], other: [] } + const sections = {feat: [], fix: [], chore: [], docs: [], refactor: [], test: [], other: []} for (const line of lines) { const [message, hash] = line.split('|') const parsed = parseConventionalCommit(message) const type = parsed?.type ?? 'other' - const entry = { message: message.trim(), hash: hash?.slice(0, 7) ?? '' } + const entry = {message: message.trim(), hash: hash?.slice(0, 7) ?? ''} if (type in sections) { sections[type].push(entry) } else { @@ -65,7 +65,7 @@ export default class Changelog extends Command { } } - if (isJson) return { from: from || 'beginning', to: flags.to, sections } + if (isJson) return {from: from || 'beginning', to: flags.to, sections} // Build markdown const title = `## [Unreleased]${from ? ` (since ${from})` : ''}` @@ -97,6 +97,6 @@ export default class Changelog extends Command { this.log(output) } - return { from: from || 'beginning', to: flags.to, sections } + return {from: from || 'beginning', to: flags.to, sections} } } diff --git a/src/commands/costs/get.js b/src/commands/costs/get.js index c741717..e0cc332 100644 --- a/src/commands/costs/get.js +++ b/src/commands/costs/get.js @@ -1,10 +1,10 @@ -import { Command, Args, Flags } from '@oclif/core' -import { input } from '@inquirer/prompts' +import {Command, Args, Flags} from '@oclif/core' +import {input} from '@inquirer/prompts' import ora from 'ora' -import { getServiceCosts } from '../../services/aws-costs.js' -import { loadConfig } from '../../services/config.js' -import { formatCostTable, calculateTotal } from '../../formatters/cost.js' -import { DvmiError } from '../../utils/errors.js' +import {getServiceCosts} from '../../services/aws-costs.js' +import {loadConfig} from '../../services/config.js' +import {formatCostTable, calculateTotal} from '../../formatters/cost.js' +import {DvmiError} from '../../utils/errors.js' import { awsVaultPrefix, isAwsVaultSession, @@ -28,7 +28,7 @@ export default class CostsGet extends Command { static enableJsonFlag = true static args = { - service: Args.string({ description: 'Service name (used to derive tag filter from config)', required: false }), + service: Args.string({description: 'Service name (used to derive tag filter from config)', required: false}), } static flags = { @@ -48,18 +48,14 @@ export default class CostsGet extends Command { } async run() { - const { args, flags } = await this.parse(CostsGet) + const {args, flags} = await this.parse(CostsGet) const isJson = flags.json const isInteractive = !isJson && process.stdout.isTTY && process.env.CI !== 'true' const groupBy = /** @type {'service'|'tag'|'both'} */ (flags['group-by']) const config = await loadConfig() - if ( - isInteractive && - !isAwsVaultSession() && - process.env.DVMI_AWS_VAULT_REEXEC !== '1' - ) { + if (isInteractive && !isAwsVaultSession() && process.env.DVMI_AWS_VAULT_REEXEC !== '1') { const profile = await input({ message: 'AWS profile (aws-vault):', default: config.awsProfile || process.env.AWS_VAULT || 'default', @@ -91,19 +87,16 @@ export default class CostsGet extends Command { // Validate: tag key required when grouping by tag or both if ((groupBy === 'tag' || groupBy === 'both') && !tagKey) { - throw new DvmiError( - 'No tag key available.', - 'Pass --tag-key or configure projectTags in dvmi config.', - ) + throw new DvmiError('No tag key available.', 'Pass --tag-key or configure projectTags in dvmi config.') } const serviceArg = args.service ?? 'all' - const tags = config.projectTags ?? (args.service ? { project: args.service } : {}) + const tags = config.projectTags ?? (args.service ? {project: args.service} : {}) const spinner = isJson ? null : ora(`Fetching costs...`).start() try { - const { entries, period } = await getServiceCosts( + const {entries, period} = await getServiceCosts( serviceArg, tags, /** @type {any} */ (flags.period), @@ -119,7 +112,7 @@ export default class CostsGet extends Command { tagKey: tagKey ?? null, period, items: entries, - total: { amount: total, unit: 'USD' }, + total: {amount: total, unit: 'USD'}, } if (isJson) return result @@ -156,10 +149,7 @@ export default class CostsGet extends Command { } const prefix = awsVaultPrefix(config) - this.error( - `No AWS credentials. Use: ${prefix}dvmi costs get` + - (args.service ? ` ${args.service}` : ''), - ) + this.error(`No AWS credentials. Use: ${prefix}dvmi costs get` + (args.service ? ` ${args.service}` : '')) } throw err } diff --git a/src/commands/costs/trend.js b/src/commands/costs/trend.js index 4b85bc2..3c0ea24 100644 --- a/src/commands/costs/trend.js +++ b/src/commands/costs/trend.js @@ -1,10 +1,10 @@ -import { Command, Flags } from '@oclif/core' -import { input } from '@inquirer/prompts' +import {Command, Flags} from '@oclif/core' +import {input} from '@inquirer/prompts' import ora from 'ora' -import { getTrendCosts, getTwoMonthPeriod } from '../../services/aws-costs.js' -import { loadConfig } from '../../services/config.js' -import { barChart, lineChart } from '../../formatters/charts.js' -import { DvmiError } from '../../utils/errors.js' +import {getTrendCosts, getTwoMonthPeriod} from '../../services/aws-costs.js' +import {loadConfig} from '../../services/config.js' +import {barChart, lineChart} from '../../formatters/charts.js' +import {DvmiError} from '../../utils/errors.js' import { awsVaultPrefix, isAwsVaultSession, @@ -42,18 +42,14 @@ export default class CostsTrend extends Command { } async run() { - const { flags } = await this.parse(CostsTrend) + const {flags} = await this.parse(CostsTrend) const isJson = flags.json const isInteractive = !isJson && process.stdout.isTTY && process.env.CI !== 'true' const groupBy = /** @type {'service'|'tag'|'both'} */ (flags['group-by']) const config = await loadConfig() - if ( - isInteractive && - !isAwsVaultSession() && - process.env.DVMI_AWS_VAULT_REEXEC !== '1' - ) { + if (isInteractive && !isAwsVaultSession() && process.env.DVMI_AWS_VAULT_REEXEC !== '1') { const profile = await input({ message: 'AWS profile (aws-vault):', default: config.awsProfile || process.env.AWS_VAULT || 'default', @@ -83,10 +79,7 @@ export default class CostsTrend extends Command { const tagKey = flags['tag-key'] ?? configTagKey if ((groupBy === 'tag' || groupBy === 'both') && !tagKey) { - throw new DvmiError( - 'No tag key available.', - 'Pass --tag-key or configure projectTags in dvmi config.', - ) + throw new DvmiError('No tag key available.', 'Pass --tag-key or configure projectTags in dvmi config.') } const spinner = isJson ? null : ora('Fetching cost trend data...').start() @@ -95,13 +88,13 @@ export default class CostsTrend extends Command { const trendSeries = await getTrendCosts(groupBy, tagKey) spinner?.stop() - const { start, end } = getTwoMonthPeriod() + const {start, end} = getTwoMonthPeriod() if (isJson) { return { groupBy, tagKey: tagKey ?? null, - period: { start, end }, + period: {start, end}, series: trendSeries, } } @@ -113,9 +106,7 @@ export default class CostsTrend extends Command { // Convert CostTrendSeries[] → ChartSeries[] // All series must share the same label (date) axis — use the union of all dates - const allDates = Array.from( - new Set(trendSeries.flatMap((s) => s.points.map((p) => p.date))), - ).sort() + const allDates = Array.from(new Set(trendSeries.flatMap((s) => s.points.map((p) => p.date)))).sort() /** @type {import('../../formatters/charts.js').ChartSeries[]} */ const chartSeries = trendSeries.map((s) => { @@ -128,9 +119,7 @@ export default class CostsTrend extends Command { }) const title = `AWS Cost Trend — last 2 months (${start} → ${end})` - const rendered = flags.line - ? lineChart(chartSeries, { title }) - : barChart(chartSeries, { title }) + const rendered = flags.line ? lineChart(chartSeries, {title}) : barChart(chartSeries, {title}) this.log(rendered) } catch (err) { diff --git a/src/commands/create/repo.js b/src/commands/create/repo.js index e4b9ace..e4a062d 100644 --- a/src/commands/create/repo.js +++ b/src/commands/create/repo.js @@ -1,12 +1,12 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { input, confirm } from '@inquirer/prompts' -import { listTemplates, createFromTemplate, setBranchProtection, enableDependabot } from '../../services/github.js' -import { loadConfig } from '../../services/config.js' -import { validateRepoName } from '../../validators/repo-name.js' -import { renderTable } from '../../formatters/table.js' -import { exec } from '../../services/shell.js' +import {input, confirm} from '@inquirer/prompts' +import {listTemplates, createFromTemplate, setBranchProtection, enableDependabot} from '../../services/github.js' +import {loadConfig} from '../../services/config.js' +import {validateRepoName} from '../../validators/repo-name.js' +import {renderTable} from '../../formatters/table.js' +import {exec} from '../../services/shell.js' /** * @param {string} lang @@ -16,15 +16,15 @@ function langColor(lang) { const map = { javascript: chalk.yellow, typescript: chalk.blue, - python: chalk.green, - java: chalk.red, - go: chalk.cyan, - ruby: chalk.magenta, - rust: chalk.hex('#CE422B'), - kotlin: chalk.hex('#7F52FF'), - swift: chalk.hex('#F05138'), - php: chalk.hex('#777BB4'), - shell: chalk.greenBright, + python: chalk.green, + java: chalk.red, + go: chalk.cyan, + ruby: chalk.magenta, + rust: chalk.hex('#CE422B'), + kotlin: chalk.hex('#7F52FF'), + swift: chalk.hex('#F05138'), + php: chalk.hex('#777BB4'), + shell: chalk.greenBright, } const fn = map[lang.toLowerCase()] return fn ? fn(lang) : chalk.dim(lang) @@ -43,21 +43,21 @@ export default class CreateRepo extends Command { static enableJsonFlag = true static args = { - template: Args.string({ description: 'Nome del template', required: false }), + template: Args.string({description: 'Nome del template', required: false}), } static flags = { - list: Flags.boolean({ description: 'Lista template disponibili', default: false }), - search: Flags.string({ char: 's', description: 'Cerca in nome e descrizione dei template (case-insensitive)' }), - name: Flags.string({ description: 'Nome del nuovo repository' }), - description: Flags.string({ description: 'Descrizione del repository', default: '' }), - private: Flags.boolean({ description: 'Repository privato (default)', default: true }), - public: Flags.boolean({ description: 'Repository pubblico', default: false }), - 'dry-run': Flags.boolean({ description: 'Preview senza eseguire', default: false }), + list: Flags.boolean({description: 'Lista template disponibili', default: false}), + search: Flags.string({char: 's', description: 'Cerca in nome e descrizione dei template (case-insensitive)'}), + name: Flags.string({description: 'Nome del nuovo repository'}), + description: Flags.string({description: 'Descrizione del repository', default: ''}), + private: Flags.boolean({description: 'Repository privato (default)', default: true}), + public: Flags.boolean({description: 'Repository pubblico', default: false}), + 'dry-run': Flags.boolean({description: 'Preview senza eseguire', default: false}), } async run() { - const { args, flags } = await this.parse(CreateRepo) + const {args, flags} = await this.parse(CreateRepo) const isJson = flags.json const isDryRun = flags['dry-run'] const config = await loadConfig() @@ -68,51 +68,58 @@ export default class CreateRepo extends Command { // --list mode if (flags.list || !args.template) { - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching templates...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching templates...')}).start() const templates = await listTemplates(config.org) spinner?.stop() // Search filter const searchQuery = flags.search?.toLowerCase() const filtered = searchQuery - ? templates.filter((t) => - t.name.toLowerCase().includes(searchQuery) || - t.description.toLowerCase().includes(searchQuery), + ? templates.filter( + (t) => t.name.toLowerCase().includes(searchQuery) || t.description.toLowerCase().includes(searchQuery), ) : templates - if (isJson) return { templates: filtered } + if (isJson) return {templates: filtered} if (templates.length === 0) { this.log(chalk.yellow('No templates found in the organization.')) this.log(chalk.dim('Templates are GitHub repos marked as "Template repository".')) - return { templates: [] } + return {templates: []} } if (filtered.length === 0) { this.log(chalk.dim(`No templates matching "${flags.search}".`)) - return { templates: [] } + return {templates: []} } - const filterInfo = flags.search - ? chalk.dim(' — search: ') + chalk.white(`"${flags.search}"`) - : '' + const filterInfo = flags.search ? chalk.dim(' — search: ') + chalk.white(`"${flags.search}"`) : '' this.log( chalk.bold('\nAvailable templates') + - filterInfo + - chalk.dim(` (${filtered.length}${filtered.length < templates.length ? `/${templates.length}` : ''})`) + - '\n', + filterInfo + + chalk.dim(` (${filtered.length}${filtered.length < templates.length ? `/${templates.length}` : ''})`) + + '\n', ) - this.log(renderTable(filtered, [ - { header: 'Name', key: 'name', width: 35 }, - { header: 'Language', key: 'language', width: 14, format: (v) => v || '—', colorize: (v) => v === '—' ? chalk.dim(v) : langColor(v) }, - { header: 'Description', key: 'description', width: 60, format: (v) => String(v || '—') }, - ])) + this.log( + renderTable(filtered, [ + {header: 'Name', key: 'name', width: 35}, + { + header: 'Language', + key: 'language', + width: 14, + format: (v) => v || '—', + colorize: (v) => (v === '—' ? chalk.dim(v) : langColor(v)), + }, + {header: 'Description', key: 'description', width: 60, format: (v) => String(v || '—')}, + ]), + ) this.log('') - return { templates: filtered } + return {templates: filtered} } // Create mode @@ -126,7 +133,7 @@ export default class CreateRepo extends Command { // Get repo name let repoName = flags.name if (!repoName && !isJson) { - repoName = await input({ message: 'Repository name:' }) + repoName = await input({message: 'Repository name:'}) } else if (!repoName) { this.error('--name is required in non-interactive mode') } @@ -142,13 +149,16 @@ export default class CreateRepo extends Command { const ok = await confirm({ message: `Create ${isPrivate ? 'private' : 'public'} repo "${config.org}/${repoName}" from "${args.template}"?`, }) - if (!ok) { this.log('Aborted.'); return } + if (!ok) { + this.log('Aborted.') + return + } } if (isDryRun) { const preview = { - repository: { name: repoName, org: config.org, template: args.template, private: isPrivate }, - postScaffolding: { branchProtection: 'would configure', dependabot: 'would enable', codeowners: 'would create' }, + repository: {name: repoName, org: config.org, template: args.template, private: isPrivate}, + postScaffolding: {branchProtection: 'would configure', dependabot: 'would enable', codeowners: 'would create'}, } if (isJson) return preview this.log(chalk.bold('\nDry run preview:')) @@ -157,7 +167,9 @@ export default class CreateRepo extends Command { } // Create repo - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating repository...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating repository...')}).start() const repo = await createFromTemplate({ templateOwner: config.org, templateRepo: args.template, @@ -169,22 +181,28 @@ export default class CreateRepo extends Command { spinner?.succeed(`Repository created: ${repo.htmlUrl}`) // Post-scaffolding - const bpSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Configuring branch protection...') }).start() + const bpSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Configuring branch protection...')}).start() await setBranchProtection(config.org, repoName).catch(() => null) bpSpinner?.succeed('Branch protection configured') - const depSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Enabling Dependabot...') }).start() + const depSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Enabling Dependabot...')}).start() await enableDependabot(config.org, repoName).catch(() => null) depSpinner?.succeed('Dependabot enabled') // Clone - const cloneSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Cloning repository...') }).start() + const cloneSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Cloning repository...')}).start() await exec('gh', ['repo', 'clone', `${config.org}/${repoName}`]) cloneSpinner?.succeed(`Cloned to ./${repoName}`) const result = { - repository: { name: repoName, url: repo.htmlUrl, localPath: `./${repoName}` }, - postScaffolding: { branchProtection: 'ok', dependabot: 'ok', codeowners: 'ok' }, + repository: {name: repoName, url: repo.htmlUrl, localPath: `./${repoName}`}, + postScaffolding: {branchProtection: 'ok', dependabot: 'ok', codeowners: 'ok'}, } if (!isJson) { diff --git a/src/commands/docs/list.js b/src/commands/docs/list.js index 67e0f0e..7fc7778 100644 --- a/src/commands/docs/list.js +++ b/src/commands/docs/list.js @@ -1,17 +1,17 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { loadConfig } from '../../services/config.js' -import { listDocs, detectCurrentRepo } from '../../services/docs.js' -import { renderTable } from '../../formatters/table.js' +import {loadConfig} from '../../services/config.js' +import {listDocs, detectCurrentRepo} from '../../services/docs.js' +import {renderTable} from '../../formatters/table.js' /** * @param {string} type * @returns {string} */ function typeColor(type) { - if (type === 'readme') return chalk.cyan(type) - if (type === 'swagger') return chalk.yellow(type) + if (type === 'readme') return chalk.cyan(type) + if (type === 'swagger') return chalk.yellow(type) if (type === 'asyncapi') return chalk.green(type) return chalk.dim(type) } @@ -38,12 +38,12 @@ export default class DocsList extends Command { static enableJsonFlag = true static flags = { - repo: Flags.string({ char: 'r', description: 'Nome del repository (default: repo nella directory corrente)' }), - search: Flags.string({ char: 's', description: 'Filtra per nome o percorso (case-insensitive)' }), + repo: Flags.string({char: 'r', description: 'Nome del repository (default: repo nella directory corrente)'}), + search: Flags.string({char: 's', description: 'Filtra per nome o percorso (case-insensitive)'}), } async run() { - const { flags } = await this.parse(DocsList) + const {flags} = await this.parse(DocsList) const isJson = flags.json const config = await loadConfig() @@ -55,13 +55,15 @@ export default class DocsList extends Command { repo = flags.repo } else { try { - ;({ owner, repo } = await detectCurrentRepo()) + ;({owner, repo} = await detectCurrentRepo()) } catch (err) { this.error(/** @type {Error} */ (err).message) } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching documentation...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching documentation...')}).start() let entries try { entries = await listDocs(owner, repo) @@ -77,34 +79,36 @@ export default class DocsList extends Command { ? entries.filter((e) => e.name.toLowerCase().includes(q) || e.path.toLowerCase().includes(q)) : entries - if (isJson) return { repo, owner, entries: filtered, total: filtered.length } + if (isJson) return {repo, owner, entries: filtered, total: filtered.length} if (entries.length === 0) { this.log(chalk.dim(`No documentation found in ${owner}/${repo}.`)) - return { repo, owner, entries: [], total: 0 } + return {repo, owner, entries: [], total: 0} } if (filtered.length === 0) { this.log(chalk.dim(`No documentation matching "${flags.search}" in ${owner}/${repo}.`)) - return { repo, owner, entries: [], total: 0 } + return {repo, owner, entries: [], total: 0} } - const filterInfo = q ? chalk.dim(` — search: ${chalk.white(`"${flags.search}"`)}`): '' + const filterInfo = q ? chalk.dim(` — search: ${chalk.white(`"${flags.search}"`)}`) : '' this.log( chalk.bold(`\nDocumentation in ${owner}/${repo}`) + - filterInfo + - chalk.dim(` (${filtered.length}${filtered.length < entries.length ? `/${entries.length}` : ''})`) + - '\n', + filterInfo + + chalk.dim(` (${filtered.length}${filtered.length < entries.length ? `/${entries.length}` : ''})`) + + '\n', ) - this.log(renderTable(filtered, [ - { header: 'Type', key: 'type', width: 10, colorize: typeColor }, - { header: 'Name', key: 'name', width: 30 }, - { header: 'Path', key: 'path', width: 50 }, - { header: 'Size', key: 'size', width: 8, format: (v) => formatSize(Number(v)) }, - ])) + this.log( + renderTable(filtered, [ + {header: 'Type', key: 'type', width: 10, colorize: typeColor}, + {header: 'Name', key: 'name', width: 30}, + {header: 'Path', key: 'path', width: 50}, + {header: 'Size', key: 'size', width: 8, format: (v) => formatSize(Number(v))}, + ]), + ) this.log('') - return { repo, owner, entries: filtered, total: filtered.length } + return {repo, owner, entries: filtered, total: filtered.length} } } diff --git a/src/commands/docs/projects.js b/src/commands/docs/projects.js index fa1cb3d..04aa260 100644 --- a/src/commands/docs/projects.js +++ b/src/commands/docs/projects.js @@ -1,13 +1,13 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { loadConfig } from '../../services/config.js' -import { listRepos } from '../../services/github.js' -import { listProjectsDocs } from '../../services/docs.js' -import { renderTable } from '../../formatters/table.js' +import {loadConfig} from '../../services/config.js' +import {listRepos} from '../../services/github.js' +import {listProjectsDocs} from '../../services/docs.js' +import {renderTable} from '../../formatters/table.js' export default class DocsProjects extends Command { - static description = 'Mostra la documentazione disponibile per ogni repository dell\'organizzazione' + static description = "Mostra la documentazione disponibile per ogni repository dell'organizzazione" static examples = [ '<%= config.bin %> docs projects', @@ -18,11 +18,11 @@ export default class DocsProjects extends Command { static enableJsonFlag = true static flags = { - search: Flags.string({ char: 's', description: 'Filtra per nome repository (case-insensitive)' }), + search: Flags.string({char: 's', description: 'Filtra per nome repository (case-insensitive)'}), } async run() { - const { flags } = await this.parse(DocsProjects) + const {flags} = await this.parse(DocsProjects) const isJson = flags.json const config = await loadConfig() @@ -31,7 +31,9 @@ export default class DocsProjects extends Command { } // 1. Fetch all repos - const repoSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching repositories...') }).start() + const repoSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching repositories...')}).start() let repos try { repos = await listRepos(config.org) @@ -43,7 +45,7 @@ export default class DocsProjects extends Command { if (repos.length === 0) { this.log(chalk.dim(`No repositories found in organization "${config.org}".`)) - return { org: config.org, projects: [], total: 0 } + return {org: config.org, projects: [], total: 0} } // 2. Filter by search @@ -52,11 +54,17 @@ export default class DocsProjects extends Command { if (filteredRepos.length === 0) { this.log(chalk.dim(`No repositories matching "${flags.search}" in ${config.org}.`)) - return { org: config.org, projects: [], total: 0 } + return {org: config.org, projects: [], total: 0} } // 3. Scan each repo for docs - const scanSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(`Scanning docs in ${filteredRepos.length} repositories...`) }).start() + const scanSpinner = isJson + ? null + : ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')(`Scanning docs in ${filteredRepos.length} repositories...`), + }).start() const repoNames = filteredRepos.map((r) => r.name) let projects @@ -68,25 +76,51 @@ export default class DocsProjects extends Command { } scanSpinner?.stop() - if (isJson) return { org: config.org, projects, total: projects.length } + if (isJson) return {org: config.org, projects, total: projects.length} const filterInfo = q ? chalk.dim(` — search: ${chalk.white(`"${flags.search}"`)}`) : '' this.log( chalk.bold(`\nDocumentation overview for ${config.org}`) + - filterInfo + - chalk.dim(` (${projects.length}${projects.length < repos.length ? `/${repos.length}` : ''})`) + - '\n', + filterInfo + + chalk.dim(` (${projects.length}${projects.length < repos.length ? `/${repos.length}` : ''})`) + + '\n', ) - this.log(renderTable(projects, [ - { header: 'Repository', key: 'repo', width: 40 }, - { header: 'README', key: 'hasReadme', width: 8, format: (v) => v ? '✓' : '—', colorize: (v) => v === '✓' ? chalk.green(v) : chalk.dim(v) }, - { header: 'Docs', key: 'docsCount', width: 6, format: (v) => Number(v) > 0 ? String(v) : '—', colorize: (v) => v !== '—' ? chalk.cyan(v) : chalk.dim(v) }, - { header: 'Swagger', key: 'hasSwagger', width: 9, format: (v) => v ? '✓' : '—', colorize: (v) => v === '✓' ? chalk.yellow(v) : chalk.dim(v) }, - { header: 'AsyncAPI', key: 'hasAsyncApi', width: 10, format: (v) => v ? '✓' : '—', colorize: (v) => v === '✓' ? chalk.green(v) : chalk.dim(v) }, - ])) + this.log( + renderTable(projects, [ + {header: 'Repository', key: 'repo', width: 40}, + { + header: 'README', + key: 'hasReadme', + width: 8, + format: (v) => (v ? '✓' : '—'), + colorize: (v) => (v === '✓' ? chalk.green(v) : chalk.dim(v)), + }, + { + header: 'Docs', + key: 'docsCount', + width: 6, + format: (v) => (Number(v) > 0 ? String(v) : '—'), + colorize: (v) => (v !== '—' ? chalk.cyan(v) : chalk.dim(v)), + }, + { + header: 'Swagger', + key: 'hasSwagger', + width: 9, + format: (v) => (v ? '✓' : '—'), + colorize: (v) => (v === '✓' ? chalk.yellow(v) : chalk.dim(v)), + }, + { + header: 'AsyncAPI', + key: 'hasAsyncApi', + width: 10, + format: (v) => (v ? '✓' : '—'), + colorize: (v) => (v === '✓' ? chalk.green(v) : chalk.dim(v)), + }, + ]), + ) this.log('') - return { org: config.org, projects, total: projects.length } + return {org: config.org, projects, total: projects.length} } } diff --git a/src/commands/docs/read.js b/src/commands/docs/read.js index 28a231b..7ed9578 100644 --- a/src/commands/docs/read.js +++ b/src/commands/docs/read.js @@ -1,12 +1,12 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { loadConfig } from '../../services/config.js' -import { listDocs, readFile, detectCurrentRepo, detectApiSpecType } from '../../services/docs.js' -import { renderMarkdown, extractMermaidBlocks, toMermaidLiveUrl } from '../../formatters/markdown.js' -import { parseOpenApi, parseAsyncApi } from '../../formatters/openapi.js' -import { renderTable } from '../../formatters/table.js' -import { openBrowser } from '../../utils/open-browser.js' +import {loadConfig} from '../../services/config.js' +import {listDocs, readFile, detectCurrentRepo, detectApiSpecType} from '../../services/docs.js' +import {renderMarkdown, extractMermaidBlocks, toMermaidLiveUrl} from '../../formatters/markdown.js' +import {parseOpenApi, parseAsyncApi} from '../../formatters/openapi.js' +import {renderTable} from '../../formatters/table.js' +import {openBrowser} from '../../utils/open-browser.js' /** * @param {string} method @@ -52,17 +52,17 @@ export default class DocsRead extends Command { static enableJsonFlag = true static args = { - file: Args.string({ description: 'Percorso del file da leggere (default: README)', required: false }), + file: Args.string({description: 'Percorso del file da leggere (default: README)', required: false}), } static flags = { - repo: Flags.string({ char: 'r', description: 'Nome del repository (default: repo nella directory corrente)' }), - raw: Flags.boolean({ description: 'Mostra contenuto grezzo senza parsing speciale', default: false }), - render: Flags.boolean({ description: 'Apri i diagrammi Mermaid nel browser via mermaid.live', default: false }), + repo: Flags.string({char: 'r', description: 'Nome del repository (default: repo nella directory corrente)'}), + raw: Flags.boolean({description: 'Mostra contenuto grezzo senza parsing speciale', default: false}), + render: Flags.boolean({description: 'Apri i diagrammi Mermaid nel browser via mermaid.live', default: false}), } async run() { - const { args, flags } = await this.parse(DocsRead) + const {args, flags} = await this.parse(DocsRead) const isJson = flags.json const config = await loadConfig() @@ -74,7 +74,7 @@ export default class DocsRead extends Command { repo = flags.repo } else { try { - ;({ owner, repo } = await detectCurrentRepo()) + ;({owner, repo} = await detectCurrentRepo()) } catch (err) { this.error(/** @type {Error} */ (err).message) } @@ -83,7 +83,9 @@ export default class DocsRead extends Command { // Resolve file path let filePath = args.file if (!filePath) { - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Looking for README...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Looking for README...')}).start() let entries try { entries = await listDocs(owner, repo) @@ -95,24 +97,35 @@ export default class DocsRead extends Command { const readme = entries.find((e) => e.type === 'readme') if (!readme) { this.log(chalk.dim(`No README found in ${owner}/${repo}.`)) - return { repo, owner, path: null, type: null, content: null, size: 0 } + return {repo, owner, path: null, type: null, content: null, size: 0} } filePath = readme.path } // Fetch content - const spinner2 = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(`Reading ${filePath}...`) }).start() - let content - try { - content = await readFile(owner, repo, filePath) - } catch { - spinner2?.stop() - this.error(`File "${filePath}" not found in ${owner}/${repo}. Run \`dvmi docs list\` to see available documentation.`) - } + const spinner2 = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(`Reading ${filePath}...`)}).start() + let content + try { + content = await readFile(owner, repo, filePath) + } catch { + spinner2?.stop() + this.error( + `File "${filePath}" not found in ${owner}/${repo}. Run \`dvmi docs list\` to see available documentation.`, + ) + } spinner2?.stop() if (isJson) { - return { repo, owner, path: filePath, type: detectApiSpecType(filePath, content) ?? 'doc', content, size: content.length } + return { + repo, + owner, + path: filePath, + type: detectApiSpecType(filePath, content) ?? 'doc', + content, + size: content.length, + } } // Handle --render (Mermaid) @@ -133,33 +146,37 @@ export default class DocsRead extends Command { // Render if (!flags.raw && specType === 'swagger') { - const { endpoints, error } = parseOpenApi(content) + const {endpoints, error} = parseOpenApi(content) if (error || endpoints.length === 0) { this.log(chalk.yellow(`⚠ Could not parse "${filePath}" as OpenAPI spec (showing raw content). ${error ?? ''}`)) this.log(content) } else { this.log(chalk.bold(`\nAPI Endpoints — ${filePath}\n`)) - this.log(renderTable(endpoints, [ - { header: 'Method', key: 'method', width: 8, colorize: methodColor }, - { header: 'Path', key: 'path', width: 45 }, - { header: 'Summary', key: 'summary', width: 40 }, - { header: 'Parameters', key: 'parameters', width: 30, format: (v) => v || '—' }, - ])) + this.log( + renderTable(endpoints, [ + {header: 'Method', key: 'method', width: 8, colorize: methodColor}, + {header: 'Path', key: 'path', width: 45}, + {header: 'Summary', key: 'summary', width: 40}, + {header: 'Parameters', key: 'parameters', width: 30, format: (v) => v || '—'}, + ]), + ) this.log('') } } else if (!flags.raw && specType === 'asyncapi') { - const { channels, error } = parseAsyncApi(content) + const {channels, error} = parseAsyncApi(content) if (error || channels.length === 0) { this.log(chalk.yellow(`⚠ Could not parse "${filePath}" as AsyncAPI spec (showing raw content). ${error ?? ''}`)) this.log(content) } else { this.log(chalk.bold(`\nAsyncAPI Channels — ${filePath}\n`)) - this.log(renderTable(channels, [ - { header: 'Channel', key: 'channel', width: 35 }, - { header: 'Operation', key: 'operation', width: 12, colorize: opColor }, - { header: 'Summary', key: 'summary', width: 40 }, - { header: 'Message', key: 'message', width: 25, format: (v) => v || '—' }, - ])) + this.log( + renderTable(channels, [ + {header: 'Channel', key: 'channel', width: 35}, + {header: 'Operation', key: 'operation', width: 12, colorize: opColor}, + {header: 'Summary', key: 'summary', width: 40}, + {header: 'Message', key: 'message', width: 25, format: (v) => v || '—'}, + ]), + ) this.log('') } } else { @@ -167,6 +184,6 @@ export default class DocsRead extends Command { this.log(renderMarkdown(content)) } - return { repo, owner, path: filePath, type: specType ?? 'doc', content, size: content.length } + return {repo, owner, path: filePath, type: specType ?? 'doc', content, size: content.length} } } diff --git a/src/commands/docs/search.js b/src/commands/docs/search.js index 248ceea..0b2e83b 100644 --- a/src/commands/docs/search.js +++ b/src/commands/docs/search.js @@ -1,9 +1,9 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { loadConfig } from '../../services/config.js' -import { searchDocs, detectCurrentRepo } from '../../services/docs.js' -import { renderTable } from '../../formatters/table.js' +import {loadConfig} from '../../services/config.js' +import {searchDocs, detectCurrentRepo} from '../../services/docs.js' +import {renderTable} from '../../formatters/table.js' const MAX_MATCHES_PER_FILE = 3 @@ -19,15 +19,15 @@ export default class DocsSearch extends Command { static enableJsonFlag = true static args = { - term: Args.string({ description: 'Termine di ricerca (case-insensitive)', required: true }), + term: Args.string({description: 'Termine di ricerca (case-insensitive)', required: true}), } static flags = { - repo: Flags.string({ char: 'r', description: 'Nome del repository (default: repo nella directory corrente)' }), + repo: Flags.string({char: 'r', description: 'Nome del repository (default: repo nella directory corrente)'}), } async run() { - const { args, flags } = await this.parse(DocsSearch) + const {args, flags} = await this.parse(DocsSearch) const isJson = flags.json const config = await loadConfig() @@ -39,13 +39,15 @@ export default class DocsSearch extends Command { repo = flags.repo } else { try { - ;({ owner, repo } = await detectCurrentRepo()) + ;({owner, repo} = await detectCurrentRepo()) } catch (err) { this.error(/** @type {Error} */ (err).message) } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(`Searching "${args.term}" in docs...`) }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(`Searching "${args.term}" in docs...`)}).start() let matches try { matches = await searchDocs(owner, repo, args.term) @@ -55,14 +57,17 @@ export default class DocsSearch extends Command { } spinner?.stop() - if (isJson) return { repo, owner, term: args.term, matches, total: matches.length } + if (isJson) return {repo, owner, term: args.term, matches, total: matches.length} if (matches.length === 0) { this.log(chalk.dim(`No matches found for "${args.term}" in ${owner}/${repo} documentation.`)) - return { repo, owner, term: args.term, matches: [], total: 0 } + return {repo, owner, term: args.term, matches: [], total: 0} } - this.log(chalk.bold(`\nSearch results for "${args.term}" in ${owner}/${repo}`) + chalk.dim(` (${matches.length} match${matches.length === 1 ? '' : 'es'})\n`)) + this.log( + chalk.bold(`\nSearch results for "${args.term}" in ${owner}/${repo}`) + + chalk.dim(` (${matches.length} match${matches.length === 1 ? '' : 'es'})\n`), + ) // Group by file and limit rows /** @type {Map} */ @@ -80,24 +85,31 @@ export default class DocsSearch extends Command { rows.push(...shown) const extra = fileMatches.length - shown.length if (extra > 0) { - rows.push({ file: '', line: 0, context: chalk.dim(`(+${extra} more in this file)`), occurrences: 0 }) + rows.push({file: '', line: 0, context: chalk.dim(`(+${extra} more in this file)`), occurrences: 0}) } } const q = args.term.toLowerCase() - this.log(renderTable(rows, [ - { header: 'File', key: 'file', width: 35 }, - { header: 'Line', key: 'line', width: 5, format: (v) => Number(v) === 0 ? '' : String(v) }, - { header: 'Context', key: 'context', width: 65, format: (v) => { - const s = String(v) - // highlight term - const re = new RegExp(q.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'gi') - return s.replace(re, (m) => chalk.yellow.bold(m)) - }}, - { header: 'Matches', key: 'occurrences', width: 8, format: (v) => Number(v) === 0 ? '' : `${v}` }, - ])) + this.log( + renderTable(rows, [ + {header: 'File', key: 'file', width: 35}, + {header: 'Line', key: 'line', width: 5, format: (v) => (Number(v) === 0 ? '' : String(v))}, + { + header: 'Context', + key: 'context', + width: 65, + format: (v) => { + const s = String(v) + // highlight term + const re = new RegExp(q.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'), 'gi') + return s.replace(re, (m) => chalk.yellow.bold(m)) + }, + }, + {header: 'Matches', key: 'occurrences', width: 8, format: (v) => (Number(v) === 0 ? '' : `${v}`)}, + ]), + ) this.log('') - return { repo, owner, term: args.term, matches, total: matches.length } + return {repo, owner, term: args.term, matches, total: matches.length} } } diff --git a/src/commands/doctor.js b/src/commands/doctor.js index 03f763d..c517c27 100644 --- a/src/commands/doctor.js +++ b/src/commands/doctor.js @@ -1,10 +1,10 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { typewriterLine } from '../utils/typewriter.js' -import { which, exec } from '../services/shell.js' -import { checkGitHubAuth, checkAWSAuth } from '../services/auth.js' -import { formatDoctorCheck, formatDoctorSummary } from '../formatters/status.js' +import {typewriterLine} from '../utils/typewriter.js' +import {which, exec} from '../services/shell.js' +import {checkGitHubAuth, checkAWSAuth} from '../services/auth.js' +import {formatDoctorCheck, formatDoctorSummary} from '../formatters/status.js' /** @import { DoctorCheck } from '../types.js' */ @@ -20,28 +20,30 @@ export default class Doctor extends Command { static enableJsonFlag = true static flags = { - verbose: Flags.boolean({ description: 'Mostra dettagli aggiuntivi', default: false }), + verbose: Flags.boolean({description: 'Mostra dettagli aggiuntivi', default: false}), } async run() { - const { flags } = await this.parse(Doctor) + const {flags} = await this.parse(Doctor) const isJson = flags.json - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Running diagnostics...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Running diagnostics...')}).start() /** @type {DoctorCheck[]} */ const checks = [] // Software checks const softwareChecks = [ - { name: 'Node.js', cmd: 'node', args: ['--version'], required: '>=24' }, - { name: 'nvm', cmd: 'nvm', args: ['--version'], required: null }, - { name: 'npm', cmd: 'npm', args: ['--version'], required: null }, - { name: 'Git', cmd: 'git', args: ['--version'], required: null }, - { name: 'gh CLI', cmd: 'gh', args: ['--version'], required: null }, - { name: 'Docker', cmd: 'docker', args: ['--version'], required: null }, - { name: 'AWS CLI', cmd: 'aws', args: ['--version'], required: null }, - { name: 'aws-vault', cmd: 'aws-vault', args: ['--version'], required: null }, + {name: 'Node.js', cmd: 'node', args: ['--version'], required: '>=24'}, + {name: 'nvm', cmd: 'nvm', args: ['--version'], required: null}, + {name: 'npm', cmd: 'npm', args: ['--version'], required: null}, + {name: 'Git', cmd: 'git', args: ['--version'], required: null}, + {name: 'gh CLI', cmd: 'gh', args: ['--version'], required: null}, + {name: 'Docker', cmd: 'docker', args: ['--version'], required: null}, + {name: 'AWS CLI', cmd: 'aws', args: ['--version'], required: null}, + {name: 'aws-vault', cmd: 'aws-vault', args: ['--version'], required: null}, ] for (const check of softwareChecks) { @@ -68,23 +70,23 @@ export default class Doctor extends Command { } // Auth checks - const ghAuth = await checkGitHubAuth() - checks.push({ - name: 'GitHub auth', - status: ghAuth.authenticated ? 'ok' : 'fail', - version: ghAuth.authenticated ? ghAuth.username ?? null : null, - required: null, - hint: ghAuth.authenticated ? null : 'Run `dvmi auth login`', - }) - - const awsAuth = await checkAWSAuth() - checks.push({ - name: 'AWS auth', - status: awsAuth.authenticated ? 'ok' : 'warn', - version: awsAuth.authenticated ? awsAuth.account ?? null : null, - required: null, - hint: awsAuth.authenticated ? null : 'Run `dvmi auth login --aws`', - }) + const ghAuth = await checkGitHubAuth() + checks.push({ + name: 'GitHub auth', + status: ghAuth.authenticated ? 'ok' : 'fail', + version: ghAuth.authenticated ? (ghAuth.username ?? null) : null, + required: null, + hint: ghAuth.authenticated ? null : 'Run `dvmi auth login`', + }) + + const awsAuth = await checkAWSAuth() + checks.push({ + name: 'AWS auth', + status: awsAuth.authenticated ? 'ok' : 'warn', + version: awsAuth.authenticated ? (awsAuth.account ?? null) : null, + required: null, + hint: awsAuth.authenticated ? null : 'Run `dvmi auth login --aws`', + }) spinner?.stop() @@ -94,7 +96,7 @@ export default class Doctor extends Command { fail: checks.filter((c) => c.status === 'fail').length, } - if (isJson) return { checks, summary } + if (isJson) return {checks, summary} await typewriterLine('Environment Diagnostics') for (const check of checks) { @@ -110,6 +112,6 @@ export default class Doctor extends Command { } } - return { checks, summary } + return {checks, summary} } } diff --git a/src/commands/dotfiles/add.js b/src/commands/dotfiles/add.js index 198c977..bbffb82 100644 --- a/src/commands/dotfiles/add.js +++ b/src/commands/dotfiles/add.js @@ -1,8 +1,8 @@ -import { Command, Flags, Args } from '@oclif/core' +import {Command, Flags, Args} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { checkbox, confirm, input } from '@inquirer/prompts' -import { detectPlatform } from '../../services/platform.js' +import {checkbox, confirm, input} from '@inquirer/prompts' +import {detectPlatform} from '../../services/platform.js' import { isChezmoiInstalled, getManagedFiles, @@ -11,13 +11,13 @@ import { isPathSensitive, isWSLWindowsPath, } from '../../services/dotfiles.js' -import { loadConfig } from '../../services/config.js' -import { execOrThrow } from '../../services/shell.js' -import { formatDotfilesAdd } from '../../formatters/dotfiles.js' -import { DvmiError } from '../../utils/errors.js' -import { homedir } from 'node:os' -import { join } from 'node:path' -import { existsSync } from 'node:fs' +import {loadConfig} from '../../services/config.js' +import {execOrThrow} from '../../services/shell.js' +import {formatDotfilesAdd} from '../../formatters/dotfiles.js' +import {DvmiError} from '../../utils/errors.js' +import {homedir} from 'node:os' +import {join} from 'node:path' +import {existsSync} from 'node:fs' /** @import { DotfilesAddResult } from '../../types.js' */ @@ -47,13 +47,13 @@ export default class DotfilesAdd extends Command { static enableJsonFlag = true static flags = { - help: Flags.help({ char: 'h' }), - encrypt: Flags.boolean({ char: 'e', description: 'Force encryption for all files being added', default: false }), - 'no-encrypt': Flags.boolean({ description: 'Disable auto-encryption (add all as plaintext)', default: false }), + help: Flags.help({char: 'h'}), + encrypt: Flags.boolean({char: 'e', description: 'Force encryption for all files being added', default: false}), + 'no-encrypt': Flags.boolean({description: 'Disable auto-encryption (add all as plaintext)', default: false}), } static args = { - files: Args.string({ description: 'File paths to add', required: false }), + files: Args.string({description: 'File paths to add', required: false}), } // oclif does not support variadic args natively via Args.string for multiple values; @@ -61,7 +61,7 @@ export default class DotfilesAdd extends Command { static strict = false async run() { - const { flags } = await this.parse(DotfilesAdd) + const {flags} = await this.parse(DotfilesAdd) const isJson = flags.json const forceEncrypt = flags.encrypt const forceNoEncrypt = flags['no-encrypt'] @@ -73,23 +73,21 @@ export default class DotfilesAdd extends Command { // Pre-checks const config = await loadConfig() if (!config.dotfiles?.enabled) { - throw new DvmiError( - 'Chezmoi dotfiles management is not configured', - 'Run `dvmi dotfiles setup` first', - ) + throw new DvmiError('Chezmoi dotfiles management is not configured', 'Run `dvmi dotfiles setup` first') } const chezmoiInstalled = await isChezmoiInstalled() if (!chezmoiInstalled) { const platformInfo = await detectPlatform() - const hint = platformInfo.platform === 'macos' - ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' + const hint = + platformInfo.platform === 'macos' + ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' throw new DvmiError('chezmoi is not installed', hint) } const platformInfo = await detectPlatform() - const { platform } = platformInfo + const {platform} = platformInfo const sensitivePatterns = getSensitivePatterns(config) // Get already-managed files for V-007 check @@ -97,7 +95,7 @@ export default class DotfilesAdd extends Command { const managedPaths = new Set(managedFiles.map((f) => f.path)) /** @type {DotfilesAddResult} */ - const result = { added: [], skipped: [], rejected: [] } + const result = {added: [], skipped: [], rejected: []} if (fileArgs.length > 0) { // Direct mode — files provided as arguments @@ -107,19 +105,22 @@ export default class DotfilesAdd extends Command { // V-002: WSL2 Windows path rejection if (platform === 'wsl2' && isWSLWindowsPath(absPath)) { - result.rejected.push({ path: displayPath, reason: 'Windows filesystem paths not supported on WSL2. Use Linux-native paths (~/) instead.' }) + result.rejected.push({ + path: displayPath, + reason: 'Windows filesystem paths not supported on WSL2. Use Linux-native paths (~/) instead.', + }) continue } // V-001: file must exist if (!existsSync(absPath)) { - result.skipped.push({ path: displayPath, reason: 'File not found' }) + result.skipped.push({path: displayPath, reason: 'File not found'}) continue } // V-007: not already managed if (managedPaths.has(absPath)) { - result.skipped.push({ path: displayPath, reason: 'Already managed by chezmoi' }) + result.skipped.push({path: displayPath, reason: 'Already managed by chezmoi'}) continue } @@ -138,9 +139,12 @@ export default class DotfilesAdd extends Command { if (encrypt) args.push('--encrypt') args.push(absPath) await execOrThrow('chezmoi', args) - result.added.push({ path: displayPath, encrypted: encrypt }) + result.added.push({path: displayPath, encrypted: encrypt}) } catch { - result.skipped.push({ path: displayPath, reason: `Failed to add to chezmoi. Run \`chezmoi doctor\` to verify your setup.` }) + result.skipped.push({ + path: displayPath, + reason: `Failed to add to chezmoi. Run \`chezmoi doctor\` to verify your setup.`, + }) } } @@ -161,11 +165,15 @@ export default class DotfilesAdd extends Command { if (isCI || isNonInteractive) { this.error( 'This command requires an interactive terminal (TTY) when no files are specified. Provide file paths as arguments or run with --json.', - { exit: 1 }, + {exit: 1}, ) } - const spinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Loading recommended files...') }).start() + const spinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Loading recommended files...'), + }).start() const recommended = getDefaultFileList(platform) spinner.stop() @@ -191,9 +199,9 @@ export default class DotfilesAdd extends Command { }) // Offer custom file - const addCustom = await confirm({ message: 'Add a custom file path?', default: false }) + const addCustom = await confirm({message: 'Add a custom file path?', default: false}) if (addCustom) { - const customPath = await input({ message: 'Enter file path:' }) + const customPath = await input({message: 'Enter file path:'}) if (customPath.trim()) selected.push(customPath.trim()) } @@ -202,24 +210,28 @@ export default class DotfilesAdd extends Command { return result } - const addSpinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Adding files to chezmoi...') }).start() + const addSpinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Adding files to chezmoi...'), + }).start() addSpinner.stop() for (const rawPath of selected) { const absPath = expandTilde(rawPath) if (platform === 'wsl2' && isWSLWindowsPath(absPath)) { - result.rejected.push({ path: rawPath, reason: 'Windows filesystem paths not supported on WSL2' }) + result.rejected.push({path: rawPath, reason: 'Windows filesystem paths not supported on WSL2'}) continue } if (!existsSync(absPath)) { - result.skipped.push({ path: rawPath, reason: 'File not found' }) + result.skipped.push({path: rawPath, reason: 'File not found'}) continue } if (managedPaths.has(absPath)) { - result.skipped.push({ path: rawPath, reason: 'Already managed by chezmoi' }) + result.skipped.push({path: rawPath, reason: 'Already managed by chezmoi'}) continue } @@ -237,9 +249,9 @@ export default class DotfilesAdd extends Command { if (encrypt) args.push('--encrypt') args.push(absPath) await execOrThrow('chezmoi', args) - result.added.push({ path: rawPath, encrypted: encrypt }) + result.added.push({path: rawPath, encrypted: encrypt}) } catch { - result.skipped.push({ path: rawPath, reason: `Failed to add. Run \`chezmoi doctor\` to verify your setup.` }) + result.skipped.push({path: rawPath, reason: `Failed to add. Run \`chezmoi doctor\` to verify your setup.`}) } } diff --git a/src/commands/dotfiles/setup.js b/src/commands/dotfiles/setup.js index eeddbf7..0351484 100644 --- a/src/commands/dotfiles/setup.js +++ b/src/commands/dotfiles/setup.js @@ -1,30 +1,27 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { confirm } from '@inquirer/prompts' -import { detectPlatform } from '../../services/platform.js' -import { isChezmoiInstalled, getChezmoiConfig, buildSetupSteps } from '../../services/dotfiles.js' -import { formatDotfilesSetup } from '../../formatters/dotfiles.js' -import { DvmiError } from '../../utils/errors.js' +import {confirm} from '@inquirer/prompts' +import {detectPlatform} from '../../services/platform.js' +import {isChezmoiInstalled, getChezmoiConfig, buildSetupSteps} from '../../services/dotfiles.js' +import {formatDotfilesSetup} from '../../formatters/dotfiles.js' +import {DvmiError} from '../../utils/errors.js' /** @import { DotfilesSetupResult, SetupStep, StepResult } from '../../types.js' */ export default class DotfilesSetup extends Command { static description = 'Interactive wizard to configure chezmoi with age encryption for dotfile management' - static examples = [ - '<%= config.bin %> dotfiles setup', - '<%= config.bin %> dotfiles setup --json', - ] + static examples = ['<%= config.bin %> dotfiles setup', '<%= config.bin %> dotfiles setup --json'] static enableJsonFlag = true static flags = { - help: Flags.help({ char: 'h' }), + help: Flags.help({char: 'h'}), } async run() { - const { flags } = await this.parse(DotfilesSetup) + const {flags} = await this.parse(DotfilesSetup) const isJson = flags.json // Non-interactive guard @@ -33,12 +30,12 @@ export default class DotfilesSetup extends Command { if ((isCI || isNonInteractive) && !isJson) { this.error( 'This command requires an interactive terminal (TTY). Run with --json for a non-interactive status check.', - { exit: 1 }, + {exit: 1}, ) } const platformInfo = await detectPlatform() - const { platform } = platformInfo + const {platform} = platformInfo // --json branch: non-interactive setup attempt if (isJson) { @@ -52,9 +49,10 @@ export default class DotfilesSetup extends Command { sourceDir: null, publicKey: null, status: 'failed', - message: platform === 'macos' - ? 'chezmoi is not installed. Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'chezmoi is not installed. Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install', + message: + platform === 'macos' + ? 'chezmoi is not installed. Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'chezmoi is not installed. Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install', } } @@ -76,15 +74,20 @@ export default class DotfilesSetup extends Command { // --------------------------------------------------------------------------- // Interactive mode // --------------------------------------------------------------------------- - const preSpinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking chezmoi status...') }).start() + const preSpinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Checking chezmoi status...'), + }).start() const chezmoiInstalled = await isChezmoiInstalled() const existingConfig = await getChezmoiConfig() preSpinner.stop() if (!chezmoiInstalled) { - const hint = platform === 'macos' - ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' + const hint = + platform === 'macos' + ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' throw new DvmiError('chezmoi is not installed', hint) } @@ -92,18 +95,26 @@ export default class DotfilesSetup extends Command { const hasEncryption = existingConfig?.encryption?.tool === 'age' || !!existingConfig?.age?.identity if (existingConfig && hasEncryption) { this.log(chalk.green(' ✔ chezmoi is already configured with age encryption')) - const reconfigure = await confirm({ message: 'Reconfigure encryption (regenerate age key)?', default: false }) + const reconfigure = await confirm({message: 'Reconfigure encryption (regenerate age key)?', default: false}) if (!reconfigure) { const sourceDir = existingConfig?.sourceDir ?? existingConfig?.sourcePath ?? null this.log(chalk.dim(' Skipped. Existing encryption configuration kept.')) - return { platform, chezmoiInstalled: true, encryptionConfigured: true, sourceDir, publicKey: null, status: 'skipped', message: 'Existing encryption configuration kept' } + return { + platform, + chezmoiInstalled: true, + encryptionConfigured: true, + sourceDir, + publicKey: null, + status: 'skipped', + message: 'Existing encryption configuration kept', + } } } else if (existingConfig) { this.log(chalk.yellow(' chezmoi is initialised but encryption is not configured — adding age encryption')) } // Build and run steps - const steps = buildSetupSteps(platform, { existingConfig }) + const steps = buildSetupSteps(platform, {existingConfig}) this.log('') @@ -111,24 +122,24 @@ export default class DotfilesSetup extends Command { let sourceDir = null for (const step of steps) { - const typeColor = { check: chalk.blue, install: chalk.yellow, configure: chalk.cyan, verify: chalk.green } + const typeColor = {check: chalk.blue, install: chalk.yellow, configure: chalk.cyan, verify: chalk.green} const colorFn = typeColor[step.type] ?? chalk.white this.log(` ${colorFn(`[${step.type}]`)} ${step.label}`) if (step.requiresConfirmation) { - const proceed = await confirm({ message: `Proceed with: ${step.label}?`, default: true }) + const proceed = await confirm({message: `Proceed with: ${step.label}?`, default: true}) if (!proceed) { this.log(chalk.dim(' Skipped.')) continue } } - const stepSpinner = ora({ spinner: 'arc', color: false, text: chalk.dim(step.label) }).start() + const stepSpinner = ora({spinner: 'arc', color: false, text: chalk.dim(step.label)}).start() let result try { result = await step.run() } catch (err) { - result = { status: /** @type {'failed'} */ ('failed'), hint: err instanceof Error ? err.message : String(err) } + result = {status: /** @type {'failed'} */ ('failed'), hint: err instanceof Error ? err.message : String(err)} } if (result.status === 'success') { @@ -147,8 +158,26 @@ export default class DotfilesSetup extends Command { } else { stepSpinner.fail(chalk.red(`${step.label} — failed`)) if (result.hint) this.log(chalk.dim(` → ${result.hint}`)) - this.log(formatDotfilesSetup({ platform, chezmoiInstalled: true, encryptionConfigured: false, sourceDir: null, publicKey: null, status: 'failed', message: result.hint })) - return { platform, chezmoiInstalled: true, encryptionConfigured: false, sourceDir: null, publicKey: null, status: 'failed', message: result.hint } + this.log( + formatDotfilesSetup({ + platform, + chezmoiInstalled: true, + encryptionConfigured: false, + sourceDir: null, + publicKey: null, + status: 'failed', + message: result.hint, + }), + ) + return { + platform, + chezmoiInstalled: true, + encryptionConfigured: false, + sourceDir: null, + publicKey: null, + status: 'failed', + message: result.hint, + } } } @@ -161,9 +190,9 @@ export default class DotfilesSetup extends Command { // Try to get public key from key file if (!publicKey) { try { - const { homedir } = await import('node:os') - const { join } = await import('node:path') - const { readFile } = await import('node:fs/promises') + const {homedir} = await import('node:os') + const {join} = await import('node:path') + const {readFile} = await import('node:fs/promises') const keyPath = join(homedir(), '.config', 'chezmoi', 'key.txt') const keyContent = await readFile(keyPath, 'utf8').catch(() => '') const match = keyContent.match(/# public key: (age1[a-z0-9]+)/i) diff --git a/src/commands/dotfiles/status.js b/src/commands/dotfiles/status.js index 82d26bc..c78e374 100644 --- a/src/commands/dotfiles/status.js +++ b/src/commands/dotfiles/status.js @@ -1,34 +1,31 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { detectPlatform } from '../../services/platform.js' -import { isChezmoiInstalled, getChezmoiConfig, getManagedFiles, getChezmoiRemote } from '../../services/dotfiles.js' -import { loadConfig } from '../../services/config.js' -import { formatDotfilesStatus } from '../../formatters/dotfiles.js' -import { DvmiError } from '../../utils/errors.js' +import {detectPlatform} from '../../services/platform.js' +import {isChezmoiInstalled, getChezmoiConfig, getManagedFiles, getChezmoiRemote} from '../../services/dotfiles.js' +import {loadConfig} from '../../services/config.js' +import {formatDotfilesStatus} from '../../formatters/dotfiles.js' +import {DvmiError} from '../../utils/errors.js' /** @import { DotfilesStatusResult } from '../../types.js' */ export default class DotfilesStatus extends Command { static description = 'Show chezmoi dotfiles status: managed files, encryption state, and sync health' - static examples = [ - '<%= config.bin %> dotfiles status', - '<%= config.bin %> dotfiles status --json', - ] + static examples = ['<%= config.bin %> dotfiles status', '<%= config.bin %> dotfiles status --json'] static enableJsonFlag = true static flags = { - help: Flags.help({ char: 'h' }), + help: Flags.help({char: 'h'}), } async run() { - const { flags } = await this.parse(DotfilesStatus) + const {flags} = await this.parse(DotfilesStatus) const isJson = flags.json const platformInfo = await detectPlatform() - const { platform } = platformInfo + const {platform} = platformInfo const config = await loadConfig() const enabled = config.dotfiles?.enabled === true @@ -47,7 +44,7 @@ export default class DotfilesStatus extends Command { repo: null, sourceDir: null, files: [], - summary: { total: 0, encrypted: 0, plaintext: 0 }, + summary: {total: 0, encrypted: 0, plaintext: 0}, } if (isJson) return notConfiguredResult @@ -56,14 +53,17 @@ export default class DotfilesStatus extends Command { } if (!chezmoiInstalled) { - const hint = platform === 'macos' - ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' + const hint = + platform === 'macos' + ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' throw new DvmiError('chezmoi is not installed', hint) } // Gather data - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Gathering dotfiles status...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Gathering dotfiles status...')}).start() const [chezmoiConfig, files, remote] = await Promise.all([ getChezmoiConfig(), diff --git a/src/commands/dotfiles/sync.js b/src/commands/dotfiles/sync.js index 47681d5..8996b93 100644 --- a/src/commands/dotfiles/sync.js +++ b/src/commands/dotfiles/sync.js @@ -1,13 +1,13 @@ -import { Command, Flags, Args } from '@oclif/core' +import {Command, Flags, Args} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { confirm, input, select } from '@inquirer/prompts' -import { detectPlatform } from '../../services/platform.js' -import { isChezmoiInstalled, getChezmoiRemote, hasLocalChanges } from '../../services/dotfiles.js' -import { loadConfig, saveConfig } from '../../services/config.js' -import { exec, execOrThrow } from '../../services/shell.js' -import { formatDotfilesSync } from '../../formatters/dotfiles.js' -import { DvmiError } from '../../utils/errors.js' +import {confirm, input, select} from '@inquirer/prompts' +import {detectPlatform} from '../../services/platform.js' +import {isChezmoiInstalled, getChezmoiRemote, hasLocalChanges} from '../../services/dotfiles.js' +import {loadConfig, saveConfig} from '../../services/config.js' +import {exec, execOrThrow} from '../../services/shell.js' +import {formatDotfilesSync} from '../../formatters/dotfiles.js' +import {DvmiError} from '../../utils/errors.js' /** @import { DotfilesSyncResult } from '../../types.js' */ @@ -26,18 +26,18 @@ export default class DotfilesSync extends Command { static enableJsonFlag = true static flags = { - help: Flags.help({ char: 'h' }), - push: Flags.boolean({ description: 'Push local changes to remote', default: false }), - pull: Flags.boolean({ description: 'Pull remote changes and apply', default: false }), - 'dry-run': Flags.boolean({ description: 'Show what would change without applying', default: false }), + help: Flags.help({char: 'h'}), + push: Flags.boolean({description: 'Push local changes to remote', default: false}), + pull: Flags.boolean({description: 'Pull remote changes and apply', default: false}), + 'dry-run': Flags.boolean({description: 'Show what would change without applying', default: false}), } static args = { - repo: Args.string({ description: 'Remote repository URL (for initial remote setup)', required: false }), + repo: Args.string({description: 'Remote repository URL (for initial remote setup)', required: false}), } async run() { - const { flags, args } = await this.parse(DotfilesSync) + const {flags, args} = await this.parse(DotfilesSync) const isJson = flags.json const isPush = flags.push const isPull = flags.pull @@ -56,30 +56,27 @@ export default class DotfilesSync extends Command { const isCI = process.env.CI === 'true' const isNonInteractive = !process.stdout.isTTY if ((isCI || isNonInteractive) && !isJson) { - this.error( - 'This command requires an interactive terminal (TTY). Run with --json for a non-interactive sync.', - { exit: 1 }, - ) + this.error('This command requires an interactive terminal (TTY). Run with --json for a non-interactive sync.', { + exit: 1, + }) } const config = await loadConfig() if (!config.dotfiles?.enabled) { - throw new DvmiError( - 'Chezmoi dotfiles management is not configured', - 'Run `dvmi dotfiles setup` first', - ) + throw new DvmiError('Chezmoi dotfiles management is not configured', 'Run `dvmi dotfiles setup` first') } const chezmoiInstalled = await isChezmoiInstalled() if (!chezmoiInstalled) { const platformInfo = await detectPlatform() - const hint = platformInfo.platform === 'macos' - ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' + const hint = + platformInfo.platform === 'macos' + ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' throw new DvmiError('chezmoi is not installed', hint) } - const remote = config.dotfiles?.repo ?? await getChezmoiRemote() + const remote = config.dotfiles?.repo ?? (await getChezmoiRemote()) // --json mode: attempt push/pull or report status if (isJson) { @@ -105,7 +102,13 @@ export default class DotfilesSync extends Command { } /** @type {DotfilesSyncResult} */ - return { action: 'skipped', repo: effectiveRemote ?? null, status: 'skipped', message: 'No action specified', conflicts: [] } + return { + action: 'skipped', + repo: effectiveRemote ?? null, + status: 'skipped', + message: 'No action specified', + conflicts: [], + } } // --------------------------------------------------------------------------- @@ -140,22 +143,29 @@ export default class DotfilesSync extends Command { const action = await select({ message: 'What would you like to do?', choices: [ - { name: 'Push local changes to remote', value: 'push' }, - { name: 'Pull remote changes and apply', value: 'pull' }, - { name: 'Cancel', value: 'cancel' }, + {name: 'Push local changes to remote', value: 'push'}, + {name: 'Pull remote changes and apply', value: 'pull'}, + {name: 'Cancel', value: 'cancel'}, ], }) if (action === 'cancel') { /** @type {DotfilesSyncResult} */ - const cancelResult = { action: 'skipped', repo: effectiveRemote ?? null, status: 'skipped', message: 'Cancelled by user', conflicts: [] } + const cancelResult = { + action: 'skipped', + repo: effectiveRemote ?? null, + status: 'skipped', + message: 'Cancelled by user', + conflicts: [], + } this.log(formatDotfilesSync(cancelResult)) return cancelResult } - const result = action === 'push' - ? await this._push(effectiveRemote, isDryRun, false) - : await this._pull(effectiveRemote, isDryRun, false) + const result = + action === 'push' + ? await this._push(effectiveRemote, isDryRun, false) + : await this._pull(effectiveRemote, isDryRun, false) this.log(formatDotfilesSync(result)) return result @@ -174,25 +184,25 @@ export default class DotfilesSync extends Command { const choice = await select({ message: 'Connect to an existing dotfiles repository or create a new one?', choices: [ - { name: 'Connect to existing repository', value: 'existing' }, - { name: 'Create new repository on GitHub', value: 'new' }, + {name: 'Connect to existing repository', value: 'existing'}, + {name: 'Create new repository on GitHub', value: 'new'}, ], }) let repoUrl = '' if (choice === 'existing') { - repoUrl = await input({ message: 'Repository URL (SSH or HTTPS):' }) + repoUrl = await input({message: 'Repository URL (SSH or HTTPS):'}) } else { - const repoName = await input({ message: 'Repository name:', default: 'dotfiles' }) - const isPrivate = await confirm({ message: 'Make repository private?', default: true }) + const repoName = await input({message: 'Repository name:', default: 'dotfiles'}) + const isPrivate = await confirm({message: 'Make repository private?', default: true}) if (!isDryRun) { try { const visFlag = isPrivate ? '--private' : '--public' await execOrThrow('gh', ['repo', 'create', repoName, visFlag, '--confirm']) // Get the SSH URL from the created repo - const { exec } = await import('../../services/shell.js') + const {exec} = await import('../../services/shell.js') const result = await exec('gh', ['repo', 'view', repoName, '--json', 'sshUrl', '--jq', '.sshUrl']) repoUrl = result.stdout.trim() || `git@github.com:${repoName}.git` } catch { @@ -211,7 +221,7 @@ export default class DotfilesSync extends Command { await execOrThrow('chezmoi', ['git', '--', 'remote', 'add', 'origin', repoUrl]) await execOrThrow('chezmoi', ['git', '--', 'push', '-u', 'origin', 'main']) // Save repo to dvmi config - config.dotfiles = { ...(config.dotfiles ?? { enabled: true }), repo: repoUrl } + config.dotfiles = {...(config.dotfiles ?? {enabled: true}), repo: repoUrl} await saveConfig(config) } catch (err) { /** @type {DotfilesSyncResult} */ @@ -232,7 +242,9 @@ export default class DotfilesSync extends Command { action: 'init-remote', repo: repoUrl, status: isDryRun ? 'skipped' : 'success', - message: isDryRun ? `Would configure remote: ${repoUrl}` : 'Remote repository configured and initial push completed', + message: isDryRun + ? `Would configure remote: ${repoUrl}` + : 'Remote repository configured and initial push completed', conflicts: [], } this.log(formatDotfilesSync(result)) @@ -271,7 +283,9 @@ export default class DotfilesSync extends Command { } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pushing to remote...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pushing to remote...')}).start() try { // Stage all changes @@ -282,7 +296,7 @@ export default class DotfilesSync extends Command { await execOrThrow('chezmoi', ['git', '--', 'push', 'origin', 'HEAD']) spinner?.succeed(chalk.green('Pushed to remote')) - return { action: 'push', repo: remote, status: 'success', message: 'Changes pushed to remote', conflicts: [] } + return {action: 'push', repo: remote, status: 'success', message: 'Changes pushed to remote', conflicts: []} } catch (err) { spinner?.fail(chalk.red('Push failed')) return { @@ -327,7 +341,9 @@ export default class DotfilesSync extends Command { } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pulling from remote...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pulling from remote...')}).start() try { // Check if chezmoi init was done with this remote (first-time pull) @@ -360,7 +376,7 @@ export default class DotfilesSync extends Command { } } - return { action: 'pull', repo: remote, status: 'success', message: 'Remote changes applied', conflicts: [] } + return {action: 'pull', repo: remote, status: 'success', message: 'Remote changes applied', conflicts: []} } catch (err) { spinner?.fail(chalk.red('Pull failed')) return { diff --git a/src/commands/init.js b/src/commands/init.js index 056b08f..a4f1f4b 100644 --- a/src/commands/init.js +++ b/src/commands/init.js @@ -1,34 +1,30 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { confirm, input, select } from '@inquirer/prompts' -import { printWelcomeScreen } from '../utils/welcome.js' -import { typewriterLine } from '../utils/typewriter.js' -import { detectPlatform } from '../services/platform.js' -import { exec, which } from '../services/shell.js' -import { configExists, loadConfig, saveConfig, CONFIG_PATH } from '../services/config.js' -import { oauthFlow, storeToken, validateToken, getTeams } from '../services/clickup.js' -import { SUPPORTED_TOOLS } from '../services/prompts.js' -import { isChezmoiInstalled, setupChezmoiInline } from '../services/dotfiles.js' +import {confirm, input, select} from '@inquirer/prompts' +import {printWelcomeScreen} from '../utils/welcome.js' +import {typewriterLine} from '../utils/typewriter.js' +import {detectPlatform} from '../services/platform.js' +import {exec, which} from '../services/shell.js' +import {configExists, loadConfig, saveConfig, CONFIG_PATH} from '../services/config.js' +import {oauthFlow, storeToken, validateToken, getTeams} from '../services/clickup.js' +import {SUPPORTED_TOOLS} from '../services/prompts.js' +import {isChezmoiInstalled, setupChezmoiInline} from '../services/dotfiles.js' export default class Init extends Command { static description = 'Setup completo ambiente di sviluppo locale' - static examples = [ - '<%= config.bin %> init', - '<%= config.bin %> init --dry-run', - '<%= config.bin %> init --verbose', - ] + static examples = ['<%= config.bin %> init', '<%= config.bin %> init --dry-run', '<%= config.bin %> init --verbose'] static enableJsonFlag = true static flags = { - verbose: Flags.boolean({ description: 'Mostra output dettagliato', default: false }), - 'dry-run': Flags.boolean({ description: 'Mostra cosa farebbe senza eseguire', default: false }), + verbose: Flags.boolean({description: 'Mostra output dettagliato', default: false}), + 'dry-run': Flags.boolean({description: 'Mostra cosa farebbe senza eseguire', default: false}), } async run() { - const { flags } = await this.parse(Init) + const {flags} = await this.parse(Init) const isDryRun = flags['dry-run'] const isJson = flags.json @@ -38,75 +34,82 @@ export default class Init extends Command { const steps = [] // 1. Check prerequisites - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking prerequisites...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking prerequisites...')}).start() const prerequisites = [ - { name: 'Node.js', cmd: 'node', args: ['--version'], required: true }, - { name: 'nvm', cmd: 'nvm', args: ['--version'], required: false }, - { name: 'npm', cmd: 'npm', args: ['--version'], required: true }, - { name: 'Git', cmd: 'git', args: ['--version'], required: true }, - { name: 'gh CLI', cmd: 'gh', args: ['--version'], required: true }, - { name: 'Docker', cmd: 'docker', args: ['--version'], required: false }, - { name: 'AWS CLI', cmd: 'aws', args: ['--version'], required: false }, - { name: 'aws-vault', cmd: 'aws-vault', args: ['--version'], required: false }, + {name: 'Node.js', cmd: 'node', args: ['--version'], required: true}, + {name: 'nvm', cmd: 'nvm', args: ['--version'], required: false}, + {name: 'npm', cmd: 'npm', args: ['--version'], required: true}, + {name: 'Git', cmd: 'git', args: ['--version'], required: true}, + {name: 'gh CLI', cmd: 'gh', args: ['--version'], required: true}, + {name: 'Docker', cmd: 'docker', args: ['--version'], required: false}, + {name: 'AWS CLI', cmd: 'aws', args: ['--version'], required: false}, + {name: 'aws-vault', cmd: 'aws-vault', args: ['--version'], required: false}, ] for (const prereq of prerequisites) { const path = await which(prereq.cmd) const status = path ? 'ok' : prereq.required ? 'fail' : 'warn' - steps.push({ name: prereq.name, status, action: path ? 'found' : 'missing' }) + steps.push({name: prereq.name, status, action: path ? 'found' : 'missing'}) if (flags.verbose && !isJson) this.log(` ${prereq.name}: ${path ?? 'not found'}`) } spinner?.succeed('Prerequisites checked') // 2. Configure Git credential helper - const gitCredSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Configuring Git credential helper...') }).start() + const gitCredSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Configuring Git credential helper...')}).start() if (!isDryRun) { await exec('git', ['config', '--global', 'credential.helper', platform.credentialHelper]) } - steps.push({ name: 'git-credential', status: 'ok', action: isDryRun ? 'would configure' : 'configured' }) + steps.push({name: 'git-credential', status: 'ok', action: isDryRun ? 'would configure' : 'configured'}) gitCredSpinner?.succeed(`Git credential helper: ${platform.credentialHelper}`) // 3. Configure aws-vault (interactive if not configured) const awsVaultInstalled = await which('aws-vault') if (awsVaultInstalled) { - steps.push({ name: 'aws-vault', status: 'ok', action: 'found' }) + steps.push({name: 'aws-vault', status: 'ok', action: 'found'}) } else { - steps.push({ name: 'aws-vault', status: 'warn', action: 'not installed' }) + steps.push({name: 'aws-vault', status: 'warn', action: 'not installed'}) if (!isJson) { - const installHint = platform.platform === 'macos' - ? 'brew install aws-vault' - : 'run `dvmi security setup` (Debian/Ubuntu/WSL2) or install aws-vault manually' + const installHint = + platform.platform === 'macos' + ? 'brew install aws-vault' + : 'run `dvmi security setup` (Debian/Ubuntu/WSL2) or install aws-vault manually' this.log(chalk.yellow(` aws-vault not found. Install: ${installHint}`)) } } // 4. Create/update config - const configSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating config...') }).start() + const configSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating config...')}).start() let config = await loadConfig() - if (!configExists() && !isDryRun && !isJson) { - // Stop the spinner before interactive prompts to avoid TTY contention on macOS - configSpinner?.stop() - const useOrg = await confirm({ message: 'Do you use a GitHub organization? (y/n)', default: true }) - let org = '' - if (useOrg) { - org = await input({ message: 'GitHub org name:', default: config.org || '' }) - } - const awsProfile = await input({ message: 'AWS profile name:', default: config.awsProfile || 'default' }) - const awsRegion = await input({ message: 'AWS region:', default: config.awsRegion || 'eu-west-1' }) - config = { ...config, org, awsProfile, awsRegion, shell: platform.credentialHelper } - } + if (!configExists() && !isDryRun && !isJson) { + // Stop the spinner before interactive prompts to avoid TTY contention on macOS + configSpinner?.stop() + const useOrg = await confirm({message: 'Do you use a GitHub organization? (y/n)', default: true}) + let org = '' + if (useOrg) { + org = await input({message: 'GitHub org name:', default: config.org || ''}) + } + const awsProfile = await input({message: 'AWS profile name:', default: config.awsProfile || 'default'}) + const awsRegion = await input({message: 'AWS region:', default: config.awsRegion || 'eu-west-1'}) + config = {...config, org, awsProfile, awsRegion, shell: platform.credentialHelper} + } if (!isDryRun) { await saveConfig(config) } - steps.push({ name: 'config', status: 'ok', action: isDryRun ? 'would create' : 'created' }) + steps.push({name: 'config', status: 'ok', action: isDryRun ? 'would create' : 'created'}) configSpinner?.succeed(`Config: ${CONFIG_PATH}`) // 5. ClickUp wizard (T008: interactive, T009: dry-run, T010: json) if (isDryRun) { // T009: In dry-run mode report what would happen without any network calls - steps.push({ name: 'clickup', status: 'would configure' }) + steps.push({name: 'clickup', status: 'would configure'}) } else if (isJson) { // T010: In JSON mode skip wizard, report current ClickUp config status config = await loadConfig() @@ -119,11 +122,11 @@ export default class Init extends Command { }) } else { // T008: Full interactive wizard - const configureClickUp = await confirm({ message: 'Configure ClickUp integration?', default: true }) - if (!configureClickUp) { - steps.push({ name: 'clickup', status: 'skipped' }) - this.log(chalk.dim(' Skipped. Run `dvmi init` again to configure ClickUp later.')) - } else { + const configureClickUp = await confirm({message: 'Configure ClickUp integration?', default: true}) + if (!configureClickUp) { + steps.push({name: 'clickup', status: 'skipped'}) + this.log(chalk.dim(' Skipped. Run `dvmi init` again to configure ClickUp later.')) + } else { // Determine auth method const clientId = process.env.CLICKUP_CLIENT_ID const clientSecret = process.env.CLICKUP_CLIENT_SECRET @@ -133,8 +136,8 @@ export default class Init extends Command { const choice = await select({ message: 'Select ClickUp authentication method:', choices: [ - { name: 'Personal API Token (paste from ClickUp Settings > Apps)', value: 'personal_token' }, - { name: 'OAuth (opens browser)', value: 'oauth' }, + {name: 'Personal API Token (paste from ClickUp Settings > Apps)', value: 'personal_token'}, + {name: 'OAuth (opens browser)', value: 'oauth'}, ], }) authMethod = /** @type {'oauth'|'personal_token'} */ (choice) @@ -152,12 +155,16 @@ export default class Init extends Command { } if (authMethod === 'personal_token') { - const token = await input({ message: 'Paste your ClickUp Personal API Token:' }) + const token = await input({message: 'Paste your ClickUp Personal API Token:'}) await storeToken(token) } // Validate token - const validateSpinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Validating ClickUp credentials...') }).start() + const validateSpinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Validating ClickUp credentials...'), + }).start() let tokenValid = false try { const result = await validateToken() @@ -169,11 +176,11 @@ export default class Init extends Command { if (!tokenValid) { this.log(chalk.yellow(' Invalid token. Check your ClickUp Personal API Token and try again.')) - const retry = await confirm({ message: 'Retry ClickUp configuration?', default: false }) + const retry = await confirm({message: 'Retry ClickUp configuration?', default: false}) if (!retry) { - steps.push({ name: 'clickup', status: 'skipped' }) + steps.push({name: 'clickup', status: 'skipped'}) } else { - const token = await input({ message: 'Paste your ClickUp Personal API Token:' }) + const token = await input({message: 'Paste your ClickUp Personal API Token:'}) await storeToken(token) tokenValid = (await validateToken()).valid } @@ -183,7 +190,11 @@ export default class Init extends Command { // Fetch teams let teamId = '' let teamName = '' - const teamsSpinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching available teams...') }).start() + const teamsSpinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Fetching available teams...'), + }).start() try { const teams = await getTeams() teamsSpinner.stop() @@ -194,97 +205,97 @@ export default class Init extends Command { } else if (teams.length > 1) { const selected = await select({ message: 'Select your ClickUp team:', - choices: teams.map((t) => ({ name: `${t.name} (${t.id})`, value: t.id })), + choices: teams.map((t) => ({name: `${t.name} (${t.id})`, value: t.id})), }) teamId = selected teamName = teams.find((t) => t.id === selected)?.name ?? '' } else { - teamId = await input({ message: 'Enter ClickUp team ID:' }) + teamId = await input({message: 'Enter ClickUp team ID:'}) } } catch { teamsSpinner.fail('Could not fetch teams') - teamId = await input({ message: 'Enter ClickUp team ID (find in ClickUp Settings > Spaces):' }) + teamId = await input({message: 'Enter ClickUp team ID (find in ClickUp Settings > Spaces):'}) } // Save ClickUp config config = await loadConfig() - config = { ...config, clickup: { ...config.clickup, teamId, teamName, authMethod } } + config = {...config, clickup: {...config.clickup, teamId, teamName, authMethod}} await saveConfig(config) this.log(chalk.green('✓') + ' ClickUp configured successfully!') - steps.push({ name: 'clickup', status: 'configured', teamId, teamName, authMethod }) + steps.push({name: 'clickup', status: 'configured', teamId, teamName, authMethod}) } } } - // 6. AI tool selection - if (isDryRun) { - steps.push({ name: 'ai-tool', status: 'would configure' }) - } else if (isJson) { - config = await loadConfig() - steps.push({ - name: 'ai-tool', - status: config.aiTool ? 'configured' : 'not_configured', - aiTool: config.aiTool, - }) - } else { - const aiToolChoices = Object.keys(SUPPORTED_TOOLS).map((t) => ({ name: t, value: t })) - aiToolChoices.push({ name: 'none / skip', value: '' }) - const aiTool = await select({ - message: 'Select your preferred AI tool for `dvmi prompts run`:', - choices: aiToolChoices, - }) - if (aiTool) { - config = { ...config, aiTool } - await saveConfig(config) - this.log(chalk.green(`✓ AI tool set to: ${aiTool}`)) - steps.push({ name: 'ai-tool', status: 'configured', aiTool }) - } else { - steps.push({ name: 'ai-tool', status: 'skipped' }) - } - } + // 6. AI tool selection + if (isDryRun) { + steps.push({name: 'ai-tool', status: 'would configure'}) + } else if (isJson) { + config = await loadConfig() + steps.push({ + name: 'ai-tool', + status: config.aiTool ? 'configured' : 'not_configured', + aiTool: config.aiTool, + }) + } else { + const aiToolChoices = Object.keys(SUPPORTED_TOOLS).map((t) => ({name: t, value: t})) + aiToolChoices.push({name: 'none / skip', value: ''}) + const aiTool = await select({ + message: 'Select your preferred AI tool for `dvmi prompts run`:', + choices: aiToolChoices, + }) + if (aiTool) { + config = {...config, aiTool} + await saveConfig(config) + this.log(chalk.green(`✓ AI tool set to: ${aiTool}`)) + steps.push({name: 'ai-tool', status: 'configured', aiTool}) + } else { + steps.push({name: 'ai-tool', status: 'skipped'}) + } + } - // 7. Chezmoi dotfiles setup - if (isDryRun) { - steps.push({ name: 'dotfiles', status: 'would configure' }) - } else if (isJson) { - config = await loadConfig() - steps.push({ - name: 'dotfiles', - status: config.dotfiles?.enabled ? 'configured' : 'not_configured', - enabled: config.dotfiles?.enabled ?? false, - }) - } else { - const chezmoiInstalled = await isChezmoiInstalled() - if (!chezmoiInstalled) { - steps.push({ name: 'dotfiles', status: 'skipped', reason: 'chezmoi not installed' }) - } else { - const setupDotfiles = await confirm({ - message: 'Set up chezmoi dotfiles management with age encryption?', - default: false, - }) - if (setupDotfiles) { - try { - const dotfilesResult = await setupChezmoiInline(platform.platform) - config = await loadConfig() - steps.push({ name: 'dotfiles', status: dotfilesResult.status, sourceDir: dotfilesResult.sourceDir }) - } catch (err) { - steps.push({ name: 'dotfiles', status: 'failed', reason: err instanceof Error ? err.message : String(err) }) - } - } else { - steps.push({ name: 'dotfiles', status: 'skipped', hint: 'Run `dvmi dotfiles setup` anytime to enable' }) - } - } - } + // 7. Chezmoi dotfiles setup + if (isDryRun) { + steps.push({name: 'dotfiles', status: 'would configure'}) + } else if (isJson) { + config = await loadConfig() + steps.push({ + name: 'dotfiles', + status: config.dotfiles?.enabled ? 'configured' : 'not_configured', + enabled: config.dotfiles?.enabled ?? false, + }) + } else { + const chezmoiInstalled = await isChezmoiInstalled() + if (!chezmoiInstalled) { + steps.push({name: 'dotfiles', status: 'skipped', reason: 'chezmoi not installed'}) + } else { + const setupDotfiles = await confirm({ + message: 'Set up chezmoi dotfiles management with age encryption?', + default: false, + }) + if (setupDotfiles) { + try { + const dotfilesResult = await setupChezmoiInline(platform.platform) + config = await loadConfig() + steps.push({name: 'dotfiles', status: dotfilesResult.status, sourceDir: dotfilesResult.sourceDir}) + } catch (err) { + steps.push({name: 'dotfiles', status: 'failed', reason: err instanceof Error ? err.message : String(err)}) + } + } else { + steps.push({name: 'dotfiles', status: 'skipped', hint: 'Run `dvmi dotfiles setup` anytime to enable'}) + } + } + } - // 8. Shell completions - steps.push({ name: 'shell-completions', status: 'ok', action: 'install via: dvmi autocomplete' }) + // 8. Shell completions + steps.push({name: 'shell-completions', status: 'ok', action: 'install via: dvmi autocomplete'}) - const result = { steps, configPath: CONFIG_PATH } + const result = {steps, configPath: CONFIG_PATH} - if (isJson) return result + if (isJson) return result - await typewriterLine('✓ Setup complete!') - this.log(chalk.dim(' Run `dvmi doctor` to verify your environment')) + await typewriterLine('✓ Setup complete!') + this.log(chalk.dim(' Run `dvmi doctor` to verify your environment')) return result } diff --git a/src/commands/logs/index.js b/src/commands/logs/index.js index 0add538..69f8edf 100644 --- a/src/commands/logs/index.js +++ b/src/commands/logs/index.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' -import { search, input } from '@inquirer/prompts' -import { listLogGroups, filterLogEvents, sinceToEpochMs } from '../../services/cloudwatch-logs.js' -import { loadConfig } from '../../services/config.js' -import { DvmiError } from '../../utils/errors.js' +import {search, input} from '@inquirer/prompts' +import {listLogGroups, filterLogEvents, sinceToEpochMs} from '../../services/cloudwatch-logs.js' +import {loadConfig} from '../../services/config.js' +import {DvmiError} from '../../utils/errors.js' const SINCE_OPTIONS = ['1h', '24h', '7d'] @@ -44,7 +44,7 @@ export default class Logs extends Command { } async run() { - const { flags } = await this.parse(Logs) + const {flags} = await this.parse(Logs) const isJson = flags.json // Validate --limit @@ -86,9 +86,7 @@ export default class Logs extends Command { message: 'Select a log group', source: async (input) => { const term = (input ?? '').toLowerCase() - return groups - .filter((g) => g.name.toLowerCase().includes(term)) - .map((g) => ({ name: g.name, value: g.name })) + return groups.filter((g) => g.name.toLowerCase().includes(term)).map((g) => ({name: g.name, value: g.name})) }, }) @@ -102,7 +100,7 @@ export default class Logs extends Command { } } - const { startTime, endTime } = sinceToEpochMs(/** @type {'1h'|'24h'|'7d'} */ (flags.since)) + const {startTime, endTime} = sinceToEpochMs(/** @type {'1h'|'24h'|'7d'} */ (flags.since)) const fetchSpinner = isJson ? null : ora('Fetching log events...').start() @@ -171,14 +169,10 @@ export default class Logs extends Command { _handleAwsError(err, _region, _logGroupName) { const msg = String(err) if (msg.includes('AccessDenied') || msg.includes('UnauthorizedAccess')) { - this.error( - 'Access denied. Ensure your role has logs:DescribeLogGroups and logs:FilterLogEvents permissions.', - ) + this.error('Access denied. Ensure your role has logs:DescribeLogGroups and logs:FilterLogEvents permissions.') } if (msg.includes('ResourceNotFoundException')) { - this.error( - `Log group not found. Check the name and confirm you are using the correct region (--region).`, - ) + this.error(`Log group not found. Check the name and confirm you are using the correct region (--region).`) } if (msg.includes('InvalidParameterException')) { this.error('Invalid filter pattern or parameter. Check the pattern syntax and time range.') diff --git a/src/commands/open.js b/src/commands/open.js index c1c5eea..1d43415 100644 --- a/src/commands/open.js +++ b/src/commands/open.js @@ -1,8 +1,8 @@ -import { Command, Args } from '@oclif/core' +import {Command, Args} from '@oclif/core' import chalk from 'chalk' -import { exec } from '../services/shell.js' -import { openBrowser } from '../utils/open-browser.js' -import { loadConfig } from '../services/config.js' +import {exec} from '../services/shell.js' +import {openBrowser} from '../utils/open-browser.js' +import {loadConfig} from '../services/config.js' const VALID_TARGETS = ['repo', 'pr', 'actions', 'aws'] @@ -20,11 +20,11 @@ export default class Open extends Command { static enableJsonFlag = true static args = { - target: Args.string({ description: 'Target: repo, pr, actions, aws', required: true }), + target: Args.string({description: 'Target: repo, pr, actions, aws', required: true}), } async run() { - const { args, flags } = await this.parse(Open) + const {args, flags} = await this.parse(Open) const isJson = flags.json if (!VALID_TARGETS.includes(args.target)) { @@ -55,15 +55,15 @@ export default class Open extends Command { const branch = branchResult.stdout // Try to find open PR for current branch const prResult = await exec('gh', ['pr', 'view', '--json', 'url', '-H', branch]) - if (prResult.exitCode === 0) { - url = JSON.parse(prResult.stdout).url - } else { - this.error(`No PR found for branch "${branch}". Create one with \`dvmi pr create\``) - } + if (prResult.exitCode === 0) { + url = JSON.parse(prResult.stdout).url + } else { + this.error(`No PR found for branch "${branch}". Create one with \`dvmi pr create\``) + } } } - const result = { target: args.target, url, opened: !isJson } + const result = {target: args.target, url, opened: !isJson} if (isJson) return result diff --git a/src/commands/pipeline/logs.js b/src/commands/pipeline/logs.js index 08d7720..3bcc106 100644 --- a/src/commands/pipeline/logs.js +++ b/src/commands/pipeline/logs.js @@ -1,26 +1,23 @@ -import { Command, Args, Flags } from '@oclif/core' -import { exec } from '../../services/shell.js' +import {Command, Args, Flags} from '@oclif/core' +import {exec} from '../../services/shell.js' export default class PipelineLogs extends Command { static description = 'Log di un workflow run specifico' - static examples = [ - '<%= config.bin %> pipeline logs 12345', - '<%= config.bin %> pipeline logs 12345 --job test', - ] + static examples = ['<%= config.bin %> pipeline logs 12345', '<%= config.bin %> pipeline logs 12345 --job test'] static enableJsonFlag = true static args = { - 'run-id': Args.integer({ description: 'ID del workflow run', required: true }), + 'run-id': Args.integer({description: 'ID del workflow run', required: true}), } static flags = { - job: Flags.string({ description: 'Filtra per job name' }), + job: Flags.string({description: 'Filtra per job name'}), } async run() { - const { args, flags } = await this.parse(PipelineLogs) + const {args, flags} = await this.parse(PipelineLogs) const isJson = flags.json const ghArgs = ['run', 'view', String(args['run-id']), '--log'] @@ -32,10 +29,10 @@ export default class PipelineLogs extends Command { } if (isJson) { - return { runId: args['run-id'], log: result.stdout } + return {runId: args['run-id'], log: result.stdout} } this.log(result.stdout) - return { runId: args['run-id'], log: result.stdout } + return {runId: args['run-id'], log: result.stdout} } } diff --git a/src/commands/pipeline/rerun.js b/src/commands/pipeline/rerun.js index 7036cd1..acf3403 100644 --- a/src/commands/pipeline/rerun.js +++ b/src/commands/pipeline/rerun.js @@ -1,12 +1,12 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { confirm } from '@inquirer/prompts' -import { listWorkflowRuns, rerunWorkflow } from '../../services/github.js' -import { exec } from '../../services/shell.js' +import {confirm} from '@inquirer/prompts' +import {listWorkflowRuns, rerunWorkflow} from '../../services/github.js' +import {exec} from '../../services/shell.js' export default class PipelineRerun extends Command { - static description = 'Rilancia l\'ultimo workflow fallito' + static description = "Rilancia l'ultimo workflow fallito" static examples = [ '<%= config.bin %> pipeline rerun', @@ -17,12 +17,12 @@ export default class PipelineRerun extends Command { static enableJsonFlag = true static flags = { - 'run-id': Flags.integer({ description: 'ID specifico del run' }), - 'failed-only': Flags.boolean({ description: 'Rilancia solo i job falliti', default: false }), + 'run-id': Flags.integer({description: 'ID specifico del run'}), + 'failed-only': Flags.boolean({description: 'Rilancia solo i job falliti', default: false}), } async run() { - const { flags } = await this.parse(PipelineRerun) + const {flags} = await this.parse(PipelineRerun) const isJson = flags.json const remoteResult = await exec('git', ['remote', 'get-url', 'origin']) @@ -34,7 +34,7 @@ export default class PipelineRerun extends Command { let runId = flags['run-id'] if (!runId) { - const runs = await listWorkflowRuns(owner, repo, { limit: 10 }) + const runs = await listWorkflowRuns(owner, repo, {limit: 10}) const failed = runs.find((r) => r.conclusion === 'failure') if (!failed) { this.log(chalk.green('No failed runs found.')) @@ -47,19 +47,24 @@ export default class PipelineRerun extends Command { } if (!isJson) { - const ok = await confirm({ message: `Rerun workflow #${runId}?` }) - if (!ok) { this.log('Aborted.'); return } + const ok = await confirm({message: `Rerun workflow #${runId}?`}) + if (!ok) { + this.log('Aborted.') + return + } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Relaunching workflow...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Relaunching workflow...')}).start() await rerunWorkflow(owner, repo, runId, flags['failed-only']) spinner?.succeed(`Workflow #${runId} rerun started`) - const result = { rerun: { id: runId, failedOnly: flags['failed-only'], status: 'queued' } } + const result = {rerun: {id: runId, failedOnly: flags['failed-only'], status: 'queued'}} - if (!isJson) { - this.log(chalk.dim('Track with `dvmi pipeline status`')) - } + if (!isJson) { + this.log(chalk.dim('Track with `dvmi pipeline status`')) + } return result } diff --git a/src/commands/pipeline/status.js b/src/commands/pipeline/status.js index 79aa323..930d7ce 100644 --- a/src/commands/pipeline/status.js +++ b/src/commands/pipeline/status.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { listWorkflowRuns } from '../../services/github.js' -import { exec } from '../../services/shell.js' -import { renderTable, colorStatus } from '../../formatters/table.js' +import {listWorkflowRuns} from '../../services/github.js' +import {exec} from '../../services/shell.js' +import {renderTable, colorStatus} from '../../formatters/table.js' export default class PipelineStatus extends Command { static description = 'Stato GitHub Actions per il repo corrente' @@ -17,46 +17,50 @@ export default class PipelineStatus extends Command { static enableJsonFlag = true static flags = { - branch: Flags.string({ description: 'Filtra per branch' }), - limit: Flags.integer({ description: 'Numero di run da mostrare', default: 10 }), + branch: Flags.string({description: 'Filtra per branch'}), + limit: Flags.integer({description: 'Numero di run da mostrare', default: 10}), } async run() { - const { flags } = await this.parse(PipelineStatus) + const {flags} = await this.parse(PipelineStatus) const isJson = flags.json - // Detect repo from git remote - const remoteResult = await exec('git', ['remote', 'get-url', 'origin']) - if (remoteResult.exitCode !== 0) { - this.error('Not in a Git repository. Navigate to a repo or use `dvmi repo list`') - } + // Detect repo from git remote + const remoteResult = await exec('git', ['remote', 'get-url', 'origin']) + if (remoteResult.exitCode !== 0) { + this.error('Not in a Git repository. Navigate to a repo or use `dvmi repo list`') + } const match = remoteResult.stdout.match(/github\.com[:/]([^/]+)\/([^/.]+)/) if (!match) this.error('Could not detect GitHub repository.') const [, owner, repo] = match - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching pipeline runs...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching pipeline runs...')}).start() const runs = await listWorkflowRuns(owner, repo, { branch: flags.branch, limit: flags.limit, }) spinner?.stop() - if (isJson) return { runs } + if (isJson) return {runs} if (runs.length === 0) { this.log(chalk.dim('No workflow runs found.')) - return { runs: [] } + return {runs: []} } this.log(chalk.bold('\nGitHub Actions runs:\n')) - this.log(renderTable(runs, [ - { header: 'Status', key: 'conclusion', width: 10, format: (v) => colorStatus(v ? String(v) : 'pending') }, - { header: 'Workflow', key: 'name', width: 25 }, - { header: 'Branch', key: 'branch', width: 20 }, - { header: 'Duration', key: 'duration', width: 10, format: (v) => `${v}s` }, - { header: 'Actor', key: 'actor', width: 15 }, - ])) - - return { runs } + this.log( + renderTable(runs, [ + {header: 'Status', key: 'conclusion', width: 10, format: (v) => colorStatus(v ? String(v) : 'pending')}, + {header: 'Workflow', key: 'name', width: 25}, + {header: 'Branch', key: 'branch', width: 20}, + {header: 'Duration', key: 'duration', width: 10, format: (v) => `${v}s`}, + {header: 'Actor', key: 'actor', width: 15}, + ]), + ) + + return {runs} } } diff --git a/src/commands/pr/create.js b/src/commands/pr/create.js index 970fe93..3b3ae35 100644 --- a/src/commands/pr/create.js +++ b/src/commands/pr/create.js @@ -1,11 +1,11 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { confirm, input } from '@inquirer/prompts' -import { createPR } from '../../services/github.js' -import { exec } from '../../services/shell.js' -import { readFile } from 'node:fs/promises' -import { existsSync } from 'node:fs' +import {confirm, input} from '@inquirer/prompts' +import {createPR} from '../../services/github.js' +import {exec} from '../../services/shell.js' +import {readFile} from 'node:fs/promises' +import {existsSync} from 'node:fs' /** * @param {string} branchName @@ -14,7 +14,7 @@ import { existsSync } from 'node:fs' function titleFromBranch(branchName) { const [type, ...rest] = branchName.split('/') const desc = rest.join('/').replace(/-/g, ' ') - const typeMap = { feature: 'Feature', fix: 'Fix', chore: 'Chore', hotfix: 'Hotfix' } + const typeMap = {feature: 'Feature', fix: 'Fix', chore: 'Chore', hotfix: 'Hotfix'} return `${typeMap[type] ?? type}: ${desc}` } @@ -23,7 +23,7 @@ function titleFromBranch(branchName) { * @returns {string[]} */ function labelFromType(branchType) { - const map = { feature: ['feature'], fix: ['bug'], chore: ['chore'], hotfix: ['critical'] } + const map = {feature: ['feature'], fix: ['bug'], chore: ['chore'], hotfix: ['critical']} return map[branchType] ?? [] } @@ -39,13 +39,13 @@ export default class PRCreate extends Command { static enableJsonFlag = true static flags = { - title: Flags.string({ description: 'Titolo PR (default: auto-generated)' }), - draft: Flags.boolean({ description: 'Crea come draft', default: false }), - 'dry-run': Flags.boolean({ description: 'Preview senza eseguire', default: false }), + title: Flags.string({description: 'Titolo PR (default: auto-generated)'}), + draft: Flags.boolean({description: 'Crea come draft', default: false}), + 'dry-run': Flags.boolean({description: 'Preview senza eseguire', default: false}), } async run() { - const { flags } = await this.parse(PRCreate) + const {flags} = await this.parse(PRCreate) const isJson = flags.json const isDryRun = flags['dry-run'] // Get current branch @@ -53,9 +53,9 @@ export default class PRCreate extends Command { if (branchResult.exitCode !== 0) this.error('Not in a Git repository.') const branch = branchResult.stdout - if (['main', 'master', 'develop'].includes(branch)) { - this.error(`You're on the default branch "${branch}". Create a feature branch first with \`dvmi branch create\``) - } + if (['main', 'master', 'develop'].includes(branch)) { + this.error(`You're on the default branch "${branch}". Create a feature branch first with \`dvmi branch create\``) + } // Check for commits const repoUrl = await exec('git', ['remote', 'get-url', 'origin']) @@ -64,7 +64,9 @@ export default class PRCreate extends Command { const [, owner, repo] = repoMatch // Push branch if needed - const pushSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pushing branch...') }).start() + const pushSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Pushing branch...')}).start() await exec('git', ['push', '-u', 'origin', branch]) pushSpinner?.stop() @@ -80,32 +82,43 @@ export default class PRCreate extends Command { // Generate title const autoTitle = titleFromBranch(branch) - const title = flags.title ?? (isJson ? autoTitle : await input({ message: 'PR title:', default: autoTitle })) + const title = flags.title ?? (isJson ? autoTitle : await input({message: 'PR title:', default: autoTitle})) const branchType = branch.split('/')[0] const labels = labelFromType(branchType) - const preview = { branch, base: 'main', title, labels, draft: flags.draft } + const preview = {branch, base: 'main', title, labels, draft: flags.draft} if (isDryRun) { - if (isJson) return { pr: preview } + if (isJson) return {pr: preview} this.log(chalk.bold('Dry run — would create PR:')) this.log(JSON.stringify(preview, null, 2)) - return { pr: preview } + return {pr: preview} } if (!isJson) { - const ok = await confirm({ message: `Create PR "${title}"?` }) - if (!ok) { this.log('Aborted.'); return } + const ok = await confirm({message: `Create PR "${title}"?`}) + if (!ok) { + this.log('Aborted.') + return + } } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating PR...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Creating PR...')}).start() const pr = await createPR({ - owner, repo, title, body, - head: branch, base: 'main', - draft: flags.draft, labels, reviewers: [], + owner, + repo, + title, + body, + head: branch, + base: 'main', + draft: flags.draft, + labels, + reviewers: [], }) spinner?.succeed(`PR created: ${pr.htmlUrl}`) - const result = { pr: { number: pr.number, title, url: pr.htmlUrl, labels, draft: flags.draft } } + const result = {pr: {number: pr.number, title, url: pr.htmlUrl, labels, draft: flags.draft}} if (isJson) return result this.log(chalk.green('✓') + ' ' + pr.htmlUrl) diff --git a/src/commands/pr/detail.js b/src/commands/pr/detail.js index 6d02d6c..111bfb0 100644 --- a/src/commands/pr/detail.js +++ b/src/commands/pr/detail.js @@ -1,8 +1,8 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { getPRDetail } from '../../services/github.js' -import { exec } from '../../services/shell.js' +import {getPRDetail} from '../../services/github.js' +import {exec} from '../../services/shell.js' export default class PRDetail extends Command { static description = 'Dettaglio PR con commenti QA e checklist degli step' @@ -16,15 +16,15 @@ export default class PRDetail extends Command { static enableJsonFlag = true static args = { - number: Args.integer({ description: 'Numero della PR', required: true }), + number: Args.integer({description: 'Numero della PR', required: true}), } static flags = { - repo: Flags.string({ description: 'Repository nel formato owner/repo (default: rilevato da git remote)' }), + repo: Flags.string({description: 'Repository nel formato owner/repo (default: rilevato da git remote)'}), } async run() { - const { args, flags } = await this.parse(PRDetail) + const {args, flags} = await this.parse(PRDetail) const isJson = flags.json let owner, repo @@ -39,7 +39,9 @@ export default class PRDetail extends Command { ;[, owner, repo] = match } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Caricamento PR...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Caricamento PR...')}).start() const detail = await getPRDetail(owner, repo, args.number) spinner?.stop() diff --git a/src/commands/pr/review.js b/src/commands/pr/review.js index 4b5cdfa..e70c4c7 100644 --- a/src/commands/pr/review.js +++ b/src/commands/pr/review.js @@ -1,22 +1,19 @@ -import { Command } from '@oclif/core' +import {Command} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { listMyPRs } from '../../services/github.js' -import { loadConfig } from '../../services/config.js' -import { renderTable, colorStatus } from '../../formatters/table.js' +import {listMyPRs} from '../../services/github.js' +import {loadConfig} from '../../services/config.js' +import {renderTable, colorStatus} from '../../formatters/table.js' export default class PRReview extends Command { static description = 'Lista PR assegnate a te per la code review' - static examples = [ - '<%= config.bin %> pr review', - '<%= config.bin %> pr review --json', - ] + static examples = ['<%= config.bin %> pr review', '<%= config.bin %> pr review --json'] static enableJsonFlag = true async run() { - const { flags } = await this.parse(PRReview) + const {flags} = await this.parse(PRReview) const isJson = flags.json const config = await loadConfig() @@ -24,28 +21,30 @@ export default class PRReview extends Command { this.error("GitHub org non configurata. Esegui `dvmi init` per configurare l'ambiente.") } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Caricamento PR in review...') }).start() - const { reviewing } = await listMyPRs(config.org) + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Caricamento PR in review...')}).start() + const {reviewing} = await listMyPRs(config.org) spinner?.stop() - if (isJson) return { reviewing } + if (isJson) return {reviewing} if (reviewing.length === 0) { this.log(chalk.dim('Nessuna PR assegnata per review.')) - return { reviewing } + return {reviewing} } this.log(chalk.bold(`\nPR ASSEGNATE PER REVIEW (${reviewing.length}):`)) this.log( renderTable(reviewing, [ - { header: '#', key: 'number', width: 6 }, - { header: 'Titolo', key: 'title', width: 45 }, - { header: 'Autore', key: 'author', width: 20 }, - { header: 'Branch', key: 'headBranch', width: 30 }, - { header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v)) }, + {header: '#', key: 'number', width: 6}, + {header: 'Titolo', key: 'title', width: 45}, + {header: 'Autore', key: 'author', width: 20}, + {header: 'Branch', key: 'headBranch', width: 30}, + {header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v))}, ]), ) - return { reviewing } + return {reviewing} } } diff --git a/src/commands/pr/status.js b/src/commands/pr/status.js index 1b1f3a9..9cb5d50 100644 --- a/src/commands/pr/status.js +++ b/src/commands/pr/status.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { listMyPRs } from '../../services/github.js' -import { loadConfig } from '../../services/config.js' -import { renderTable, colorStatus } from '../../formatters/table.js' +import {listMyPRs} from '../../services/github.js' +import {loadConfig} from '../../services/config.js' +import {renderTable, colorStatus} from '../../formatters/table.js' export default class PRStatus extends Command { static description = 'Stato delle tue PR aperte (come autore e come reviewer)' @@ -17,12 +17,12 @@ export default class PRStatus extends Command { static enableJsonFlag = true static flags = { - author: Flags.boolean({ description: 'Solo PR dove sei autore', default: false }), - reviewer: Flags.boolean({ description: 'Solo PR dove sei reviewer', default: false }), + author: Flags.boolean({description: 'Solo PR dove sei autore', default: false}), + reviewer: Flags.boolean({description: 'Solo PR dove sei reviewer', default: false}), } async run() { - const { flags } = await this.parse(PRStatus) + const {flags} = await this.parse(PRStatus) const isJson = flags.json const config = await loadConfig() @@ -30,8 +30,10 @@ export default class PRStatus extends Command { this.error('GitHub org not configured. Run `dvmi init` to set up your environment.') } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching PRs...') }).start() - const { authored, reviewing } = await listMyPRs(config.org) + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching PRs...')}).start() + const {authored, reviewing} = await listMyPRs(config.org) spinner?.stop() const showAuthored = !flags.reviewer || flags.author @@ -46,25 +48,29 @@ export default class PRStatus extends Command { if (showAuthored && authored.length > 0) { this.log(chalk.bold('\nYOUR PRS:')) - this.log(renderTable(authored, [ - { header: 'Repo', key: 'headBranch', width: 30, format: (v) => String(v).split('/')[0] }, - { header: 'Title', key: 'title', width: 40 }, - { header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v)) }, - { header: 'Review', key: 'reviewStatus', width: 20, format: (v) => colorStatus(String(v)) }, - ])) + this.log( + renderTable(authored, [ + {header: 'Repo', key: 'headBranch', width: 30, format: (v) => String(v).split('/')[0]}, + {header: 'Title', key: 'title', width: 40}, + {header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v))}, + {header: 'Review', key: 'reviewStatus', width: 20, format: (v) => colorStatus(String(v))}, + ]), + ) } else if (showAuthored) { this.log(chalk.dim('No authored PRs found.')) } if (showReviewing && reviewing.length > 0) { this.log(chalk.bold('\nREVIEW REQUESTED:')) - this.log(renderTable(reviewing, [ - { header: 'Title', key: 'title', width: 40 }, - { header: 'Author', key: 'author', width: 20 }, - { header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v)) }, - ])) + this.log( + renderTable(reviewing, [ + {header: 'Title', key: 'title', width: 40}, + {header: 'Author', key: 'author', width: 20}, + {header: 'CI', key: 'ciStatus', width: 10, format: (v) => colorStatus(String(v))}, + ]), + ) } - return { authored, reviewing } + return {authored, reviewing} } } diff --git a/src/commands/prompts/browse.js b/src/commands/prompts/browse.js index 6ac1d5f..9a176e1 100644 --- a/src/commands/prompts/browse.js +++ b/src/commands/prompts/browse.js @@ -1,11 +1,11 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { select } from '@inquirer/prompts' -import { searchSkills } from '../../services/skills-sh.js' -import { fetchAwesomeEntries, AWESOME_CATEGORIES } from '../../services/awesome-copilot.js' -import { formatSkillTable, formatAwesomeTable } from '../../formatters/prompts.js' -import { DvmiError } from '../../utils/errors.js' +import {select} from '@inquirer/prompts' +import {searchSkills} from '../../services/skills-sh.js' +import {fetchAwesomeEntries, AWESOME_CATEGORIES} from '../../services/awesome-copilot.js' +import {formatSkillTable, formatAwesomeTable} from '../../formatters/prompts.js' +import {DvmiError} from '../../utils/errors.js' /** @import { Skill, AwesomeEntry } from '../../types.js' */ @@ -45,7 +45,7 @@ export default class PromptsBrowse extends Command { } async run() { - const { args, flags } = await this.parse(PromptsBrowse) + const {args, flags} = await this.parse(PromptsBrowse) const isJson = flags.json const source = args.source @@ -81,7 +81,7 @@ export default class PromptsBrowse extends Command { } catch (err) { spinner?.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -89,7 +89,7 @@ export default class PromptsBrowse extends Command { spinner?.stop() if (isJson) { - return { skills, total: skills.length } + return {skills, total: skills.length} } this.log( @@ -99,7 +99,7 @@ export default class PromptsBrowse extends Command { ) this.log(formatSkillTable(skills)) - return { skills, total: skills.length } + return {skills, total: skills.length} } // source === 'awesome' @@ -118,7 +118,7 @@ export default class PromptsBrowse extends Command { } catch (err) { spinner?.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -126,7 +126,7 @@ export default class PromptsBrowse extends Command { spinner?.stop() if (isJson) { - return { entries, total: entries.length, category } + return {entries, total: entries.length, category} } this.log( @@ -140,8 +140,8 @@ export default class PromptsBrowse extends Command { if (entries.length > 0) { try { - const choices = entries.map((e) => ({ name: `${e.name} ${chalk.dim(e.url)}`, value: e })) - choices.push({ name: chalk.dim('← Exit'), value: /** @type {AwesomeEntry} */ (null) }) + const choices = entries.map((e) => ({name: `${e.name} ${chalk.dim(e.url)}`, value: e})) + choices.push({name: chalk.dim('← Exit'), value: /** @type {AwesomeEntry} */ (null)}) const selected = await select({ message: 'Select an entry to view its URL (or Exit):', @@ -159,6 +159,6 @@ export default class PromptsBrowse extends Command { } } - return { entries, total: entries.length, category } + return {entries, total: entries.length, category} } } diff --git a/src/commands/prompts/download.js b/src/commands/prompts/download.js index f44ea5d..c3b2562 100644 --- a/src/commands/prompts/download.js +++ b/src/commands/prompts/download.js @@ -1,11 +1,11 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { select, confirm } from '@inquirer/prompts' -import { join } from 'node:path' -import { listPrompts, downloadPrompt } from '../../services/prompts.js' -import { loadConfig } from '../../services/config.js' -import { DvmiError } from '../../utils/errors.js' +import {select, confirm} from '@inquirer/prompts' +import {join} from 'node:path' +import {listPrompts, downloadPrompt} from '../../services/prompts.js' +import {loadConfig} from '../../services/config.js' +import {DvmiError} from '../../utils/errors.js' /** @import { Prompt } from '../../types.js' */ @@ -38,7 +38,7 @@ export default class PromptsDownload extends Command { } async run() { - const { args, flags } = await this.parse(PromptsDownload) + const {args, flags} = await this.parse(PromptsDownload) const isJson = flags.json // Determine local prompts directory from config or default to cwd/.prompts @@ -48,8 +48,7 @@ export default class PromptsDownload extends Command { } catch { /* use defaults */ } - const localDir = - process.env.DVMI_PROMPTS_DIR ?? config.promptsDir ?? join(process.cwd(), DEFAULT_PROMPTS_DIR) + const localDir = process.env.DVMI_PROMPTS_DIR ?? config.promptsDir ?? join(process.cwd(), DEFAULT_PROMPTS_DIR) // Resolve path interactively if not provided (only in interactive mode) let relativePath = args.path @@ -74,7 +73,7 @@ export default class PromptsDownload extends Command { } catch (err) { spinner.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -82,14 +81,14 @@ export default class PromptsDownload extends Command { if (prompts.length === 0) { this.log(chalk.yellow('No prompts found in the repository.')) - return { downloaded: [], skipped: [] } + return {downloaded: [], skipped: []} } const choices = prompts.map((p) => ({ name: `${p.path} ${chalk.dim(p.title)}`, value: p.path, })) - relativePath = await select({ message: 'Select a prompt to download:', choices }) + relativePath = await select({message: 'Select a prompt to download:', choices}) } // Attempt download (skips automatically if file exists and --overwrite not set) @@ -103,11 +102,11 @@ export default class PromptsDownload extends Command { let result try { - result = await downloadPrompt(relativePath, localDir, { overwrite: flags.overwrite }) + result = await downloadPrompt(relativePath, localDir, {overwrite: flags.overwrite}) } catch (err) { spinner?.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -122,10 +121,10 @@ export default class PromptsDownload extends Command { }) if (shouldOverwrite) { try { - result = await downloadPrompt(relativePath, localDir, { overwrite: true }) + result = await downloadPrompt(relativePath, localDir, {overwrite: true}) } catch (err) { if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } diff --git a/src/commands/prompts/install-speckit.js b/src/commands/prompts/install-speckit.js index 8571789..c086a63 100644 --- a/src/commands/prompts/install-speckit.js +++ b/src/commands/prompts/install-speckit.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { loadConfig } from '../../services/config.js' -import { isUvInstalled, isSpecifyInstalled, installSpecifyCli, runSpecifyInit } from '../../services/speckit.js' -import { DvmiError } from '../../utils/errors.js' +import {loadConfig} from '../../services/config.js' +import {isUvInstalled, isSpecifyInstalled, installSpecifyCli, runSpecifyInit} from '../../services/speckit.js' +import {DvmiError} from '../../utils/errors.js' /** * Map from dvmi's `aiTool` config values to spec-kit's `--ai` flag values. @@ -26,8 +26,7 @@ export default class PromptsInstallSpeckit extends Command { static flags = { ai: Flags.string({ - description: - 'AI agent to pass to `specify init --ai` (defaults to the aiTool set in `dvmi init`)', + description: 'AI agent to pass to `specify init --ai` (defaults to the aiTool set in `dvmi init`)', options: ['opencode', 'copilot', 'claude', 'gemini', 'cursor-agent', 'codex', 'windsurf', 'kiro-cli', 'amp'], }), force: Flags.boolean({ @@ -42,7 +41,7 @@ export default class PromptsInstallSpeckit extends Command { } async run() { - const { flags } = await this.parse(PromptsInstallSpeckit) + const {flags} = await this.parse(PromptsInstallSpeckit) // ── 1. Require uv ──────────────────────────────────────────────────────── if (!(await isUvInstalled())) { @@ -57,15 +56,15 @@ export default class PromptsInstallSpeckit extends Command { if (!alreadyInstalled || flags.reinstall) { const label = alreadyInstalled ? 'Reinstalling specify-cli...' : 'Installing specify-cli...' - const spinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(label) }).start() + const spinner = ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')(label)}).start() try { - await installSpecifyCli({ force: flags.reinstall }) + await installSpecifyCli({force: flags.reinstall}) spinner.succeed(chalk.green('specify-cli installed')) } catch (err) { spinner.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: 1, suggestions: [err.hint] }) + this.error(err.message, {exit: 1, suggestions: [err.hint]}) } throw err } @@ -86,10 +85,10 @@ export default class PromptsInstallSpeckit extends Command { this.log('') try { - await runSpecifyInit(process.cwd(), { ai: aiFlag, force: flags.force }) + await runSpecifyInit(process.cwd(), {ai: aiFlag, force: flags.force}) } catch (err) { if (err instanceof DvmiError) { - this.error(err.message, { exit: 1, suggestions: [err.hint] }) + this.error(err.message, {exit: 1, suggestions: [err.hint]}) } throw err } diff --git a/src/commands/prompts/list.js b/src/commands/prompts/list.js index e706bd7..522fe1b 100644 --- a/src/commands/prompts/list.js +++ b/src/commands/prompts/list.js @@ -1,10 +1,10 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { select } from '@inquirer/prompts' -import { listPrompts } from '../../services/prompts.js' -import { formatPromptTable, formatPromptBody } from '../../formatters/prompts.js' -import { DvmiError } from '../../utils/errors.js' +import {select} from '@inquirer/prompts' +import {listPrompts} from '../../services/prompts.js' +import {formatPromptTable, formatPromptBody} from '../../formatters/prompts.js' +import {DvmiError} from '../../utils/errors.js' /** @import { Prompt } from '../../types.js' */ @@ -27,7 +27,7 @@ export default class PromptsList extends Command { } async run() { - const { flags } = await this.parse(PromptsList) + const {flags} = await this.parse(PromptsList) const isJson = flags.json const spinner = isJson @@ -45,7 +45,7 @@ export default class PromptsList extends Command { } catch (err) { spinner?.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -65,7 +65,7 @@ export default class PromptsList extends Command { : prompts if (isJson) { - return { prompts: filtered, total: filtered.length } + return {prompts: filtered, total: filtered.length} } if (filtered.length === 0) { @@ -73,7 +73,7 @@ export default class PromptsList extends Command { ? chalk.dim(`No prompts matching "${flags.filter}".`) : chalk.yellow('No prompts found in the repository.') this.log(msg) - return { prompts: [], total: 0 } + return {prompts: [], total: 0} } const filterInfo = query ? chalk.dim(` — filter: ${chalk.white(`"${flags.filter}"`)}`) : '' @@ -87,8 +87,8 @@ export default class PromptsList extends Command { // Interactive selection to view full prompt content try { - const choices = filtered.map((p) => ({ name: p.title, value: p })) - choices.push({ name: chalk.dim('← Exit'), value: /** @type {Prompt} */ (null) }) + const choices = filtered.map((p) => ({name: p.title, value: p})) + choices.push({name: chalk.dim('← Exit'), value: /** @type {Prompt} */ (null)}) const selected = await select({ message: 'Select a prompt to view its content (or Exit):', @@ -102,6 +102,6 @@ export default class PromptsList extends Command { // User pressed Ctrl+C — exit gracefully } - return { prompts: filtered, total: filtered.length } + return {prompts: filtered, total: filtered.length} } } diff --git a/src/commands/prompts/run.js b/src/commands/prompts/run.js index 78cfd9c..32a6c7f 100644 --- a/src/commands/prompts/run.js +++ b/src/commands/prompts/run.js @@ -1,12 +1,12 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { select, confirm } from '@inquirer/prompts' -import { join } from 'node:path' -import { readdir } from 'node:fs/promises' -import { resolveLocalPrompt, invokeTool, SUPPORTED_TOOLS } from '../../services/prompts.js' -import { loadConfig } from '../../services/config.js' -import { DvmiError } from '../../utils/errors.js' +import {select, confirm} from '@inquirer/prompts' +import {join} from 'node:path' +import {readdir} from 'node:fs/promises' +import {resolveLocalPrompt, invokeTool, SUPPORTED_TOOLS} from '../../services/prompts.js' +import {loadConfig} from '../../services/config.js' +import {DvmiError} from '../../utils/errors.js' /** @import { AITool } from '../../types.js' */ @@ -23,7 +23,7 @@ async function walkPrompts(dir, base) { const results = [] let entries try { - entries = await readdir(dir, { withFileTypes: true }) + entries = await readdir(dir, {withFileTypes: true}) } catch { return results } @@ -67,7 +67,7 @@ export default class PromptsRun extends Command { } async run() { - const { args, flags } = await this.parse(PromptsRun) + const {args, flags} = await this.parse(PromptsRun) const isJson = flags.json // Load config @@ -78,8 +78,7 @@ export default class PromptsRun extends Command { /* use defaults */ } - const localDir = - process.env.DVMI_PROMPTS_DIR ?? config.promptsDir ?? join(process.cwd(), DEFAULT_PROMPTS_DIR) + const localDir = process.env.DVMI_PROMPTS_DIR ?? config.promptsDir ?? join(process.cwd(), DEFAULT_PROMPTS_DIR) // Resolve tool: --tool flag > config.aiTool const toolName = /** @type {AITool | undefined} */ (flags.tool ?? config.aiTool) @@ -111,7 +110,7 @@ export default class PromptsRun extends Command { prompt = await resolveLocalPrompt(args.path, localDir) } catch (err) { if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -142,7 +141,7 @@ export default class PromptsRun extends Command { relativePath = await select({ message: 'Select a local prompt to run:', - choices: localPaths.map((p) => ({ name: p, value: p })), + choices: localPaths.map((p) => ({name: p, value: p})), }) } @@ -168,7 +167,7 @@ export default class PromptsRun extends Command { } catch (err) { spinner.fail() if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } @@ -180,14 +179,12 @@ export default class PromptsRun extends Command { // This protects against prompt injection from tampered local files (originally // downloaded from remote repositories). Skipped in CI/non-interactive environments. if (!process.env.CI && process.stdin.isTTY) { - const preview = prompt.body.length > 500 - ? prompt.body.slice(0, 500) + chalk.dim('\n…[truncated]') - : prompt.body + const preview = prompt.body.length > 500 ? prompt.body.slice(0, 500) + chalk.dim('\n…[truncated]') : prompt.body this.log(chalk.yellow('Prompt preview:')) this.log(chalk.dim('─'.repeat(50))) this.log(chalk.dim(preview)) this.log(chalk.dim('─'.repeat(50)) + '\n') - const ok = await confirm({ message: `Run this prompt with ${toolName}?`, default: true }) + const ok = await confirm({message: `Run this prompt with ${toolName}?`, default: true}) if (!ok) { this.log(chalk.dim('Aborted.')) return @@ -199,7 +196,7 @@ export default class PromptsRun extends Command { await invokeTool(toolName, prompt.body) } catch (err) { if (err instanceof DvmiError) { - this.error(err.message, { exit: err.exitCode, suggestions: [err.hint] }) + this.error(err.message, {exit: err.exitCode, suggestions: [err.hint]}) } throw err } diff --git a/src/commands/repo/list.js b/src/commands/repo/list.js index 4645a2f..b95af5a 100644 --- a/src/commands/repo/list.js +++ b/src/commands/repo/list.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { listRepos } from '../../services/github.js' -import { loadConfig } from '../../services/config.js' -import { renderTable } from '../../formatters/table.js' +import {listRepos} from '../../services/github.js' +import {loadConfig} from '../../services/config.js' +import {renderTable} from '../../formatters/table.js' /** * @param {string} lang @@ -13,22 +13,22 @@ function langColor(lang) { const map = { javascript: chalk.yellow, typescript: chalk.blue, - python: chalk.green, - java: chalk.red, - go: chalk.cyan, - ruby: chalk.magenta, - rust: chalk.hex('#CE422B'), - kotlin: chalk.hex('#7F52FF'), - swift: chalk.hex('#F05138'), - php: chalk.hex('#777BB4'), - shell: chalk.greenBright, + python: chalk.green, + java: chalk.red, + go: chalk.cyan, + ruby: chalk.magenta, + rust: chalk.hex('#CE422B'), + kotlin: chalk.hex('#7F52FF'), + swift: chalk.hex('#F05138'), + php: chalk.hex('#777BB4'), + shell: chalk.greenBright, } const fn = map[lang.toLowerCase()] return fn ? fn(lang) : chalk.dim(lang) } export default class RepoList extends Command { - static description = 'Lista repository dell\'organizzazione' + static description = "Lista repository dell'organizzazione" static examples = [ '<%= config.bin %> repo list', @@ -41,13 +41,13 @@ export default class RepoList extends Command { static enableJsonFlag = true static flags = { - language: Flags.string({ description: 'Filtra per linguaggio' }), - topic: Flags.string({ description: 'Filtra per topic' }), - search: Flags.string({ char: 's', description: 'Cerca in nome e descrizione (case-insensitive)' }), + language: Flags.string({description: 'Filtra per linguaggio'}), + topic: Flags.string({description: 'Filtra per topic'}), + search: Flags.string({char: 's', description: 'Cerca in nome e descrizione (case-insensitive)'}), } async run() { - const { flags } = await this.parse(RepoList) + const {flags} = await this.parse(RepoList) const isJson = flags.json const config = await loadConfig() @@ -55,7 +55,9 @@ export default class RepoList extends Command { this.error('GitHub org not configured. Run `dvmi init` to set up your environment.') } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching repositories...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching repositories...')}).start() const repos = await listRepos(config.org, { language: flags.language, topic: flags.topic, @@ -65,49 +67,63 @@ export default class RepoList extends Command { // Search filter (name + description) const searchQuery = flags.search?.toLowerCase() const filtered = searchQuery - ? repos.filter((r) => - r.name.toLowerCase().includes(searchQuery) || - r.description.toLowerCase().includes(searchQuery), + ? repos.filter( + (r) => r.name.toLowerCase().includes(searchQuery) || r.description.toLowerCase().includes(searchQuery), ) : repos - if (isJson) return { repositories: filtered, total: filtered.length } + if (isJson) return {repositories: filtered, total: filtered.length} if (repos.length === 0) { this.log(chalk.yellow('No repositories found matching your filters.')) - return { repositories: [], total: 0 } + return {repositories: [], total: 0} } if (filtered.length === 0) { this.log(chalk.dim(`No repositories matching "${flags.search}".`)) - return { repositories: [], total: 0 } + return {repositories: [], total: 0} } // Build filter info line const filterInfo = [ flags.language && chalk.dim(`language: ${chalk.white(flags.language)}`), - flags.topic && chalk.dim(`topic: ${chalk.white(flags.topic)}`), - flags.search && chalk.dim(`search: ${chalk.white(`"${flags.search}"`)}`), - ].filter(Boolean).join(chalk.dim(' · ')) + flags.topic && chalk.dim(`topic: ${chalk.white(flags.topic)}`), + flags.search && chalk.dim(`search: ${chalk.white(`"${flags.search}"`)}`), + ] + .filter(Boolean) + .join(chalk.dim(' · ')) this.log( chalk.bold(`\nRepositories in ${config.org}`) + - (filterInfo ? chalk.dim(' — ') + filterInfo : '') + - chalk.dim(` (${filtered.length}${filtered.length < repos.length ? `/${repos.length}` : ''})`) + - '\n', + (filterInfo ? chalk.dim(' — ') + filterInfo : '') + + chalk.dim(` (${filtered.length}${filtered.length < repos.length ? `/${repos.length}` : ''})`) + + '\n', ) - this.log(renderTable(filtered, [ - { header: 'Name', key: 'name', width: 40 }, - { header: 'Language', key: 'language', width: 14, format: (v) => v || '—', colorize: (v) => v === '—' ? chalk.dim(v) : langColor(v) }, - { header: 'Last push', key: 'pushedAt', width: 12, format: (v) => { - const d = new Date(String(v)) - return isNaN(d.getTime()) ? '—' : d.toLocaleDateString() - }}, - { header: 'Description', key: 'description', width: 60, format: (v) => String(v || '—') }, - ])) + this.log( + renderTable(filtered, [ + {header: 'Name', key: 'name', width: 40}, + { + header: 'Language', + key: 'language', + width: 14, + format: (v) => v || '—', + colorize: (v) => (v === '—' ? chalk.dim(v) : langColor(v)), + }, + { + header: 'Last push', + key: 'pushedAt', + width: 12, + format: (v) => { + const d = new Date(String(v)) + return isNaN(d.getTime()) ? '—' : d.toLocaleDateString() + }, + }, + {header: 'Description', key: 'description', width: 60, format: (v) => String(v || '—')}, + ]), + ) this.log('') - return { repositories: filtered, total: filtered.length } + return {repositories: filtered, total: filtered.length} } } diff --git a/src/commands/search.js b/src/commands/search.js index d9b62f0..27bebb8 100644 --- a/src/commands/search.js +++ b/src/commands/search.js @@ -1,12 +1,12 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { searchCode } from '../services/github.js' -import { loadConfig } from '../services/config.js' -import { renderTable } from '../formatters/table.js' +import {searchCode} from '../services/github.js' +import {loadConfig} from '../services/config.js' +import {renderTable} from '../formatters/table.js' export default class Search extends Command { - static description = 'Cerca codice nei repository dell\'organizzazione' + static description = "Cerca codice nei repository dell'organizzazione" static examples = [ '<%= config.bin %> search "getUserById"', @@ -17,17 +17,17 @@ export default class Search extends Command { static enableJsonFlag = true static args = { - term: Args.string({ description: 'Termine di ricerca', required: true }), + term: Args.string({description: 'Termine di ricerca', required: true}), } static flags = { - language: Flags.string({ description: 'Filtra per linguaggio' }), - repo: Flags.string({ description: 'Cerca in un repo specifico' }), - limit: Flags.integer({ description: 'Max risultati', default: 20 }), + language: Flags.string({description: 'Filtra per linguaggio'}), + repo: Flags.string({description: 'Cerca in un repo specifico'}), + limit: Flags.integer({description: 'Max risultati', default: 20}), } async run() { - const { args, flags } = await this.parse(Search) + const {args, flags} = await this.parse(Search) const isJson = flags.json const config = await loadConfig() @@ -43,20 +43,22 @@ export default class Search extends Command { }) spinner?.stop() - if (isJson) return { results, total: results.length } + if (isJson) return {results, total: results.length} if (results.length === 0) { this.log(chalk.yellow(`No results found for "${args.term}" in the organization.`)) - return { results: [], total: 0 } + return {results: [], total: 0} } this.log(chalk.bold(`\n${results.length} result(s) for "${args.term}":\n`)) - this.log(renderTable(results, [ - { header: 'Repo', key: 'repo', width: 25 }, - { header: 'File', key: 'file', width: 45 }, - { header: 'Match', key: 'match' }, - ])) + this.log( + renderTable(results, [ + {header: 'Repo', key: 'repo', width: 25}, + {header: 'File', key: 'file', width: 45}, + {header: 'Match', key: 'match'}, + ]), + ) - return { results, total: results.length } + return {results, total: results.length} } } diff --git a/src/commands/security/setup.js b/src/commands/security/setup.js index 36bba19..e1fc643 100644 --- a/src/commands/security/setup.js +++ b/src/commands/security/setup.js @@ -1,30 +1,28 @@ -import { Command, Flags } from '@oclif/core' -import { confirm, select } from '@inquirer/prompts' +import {Command, Flags} from '@oclif/core' +import {confirm, select} from '@inquirer/prompts' import ora from 'ora' import chalk from 'chalk' -import { execa } from 'execa' -import { detectPlatform } from '../../services/platform.js' -import { exec } from '../../services/shell.js' -import { buildSteps, checkToolStatus, listGpgKeys, deriveOverallStatus } from '../../services/security.js' -import { formatEducationalIntro, formatStepHeader, formatSecuritySummary } from '../../formatters/security.js' +import {execa} from 'execa' +import {detectPlatform} from '../../services/platform.js' +import {exec} from '../../services/shell.js' +import {buildSteps, checkToolStatus, listGpgKeys, deriveOverallStatus} from '../../services/security.js' +import {formatEducationalIntro, formatStepHeader, formatSecuritySummary} from '../../formatters/security.js' /** @import { SetupSession, SetupStep, StepResult, PlatformInfo } from '../../types.js' */ export default class SecuritySetup extends Command { - static description = 'Interactive wizard to install and configure credential protection tools (aws-vault, pass, GPG, Git Credential Manager, macOS Keychain)' + static description = + 'Interactive wizard to install and configure credential protection tools (aws-vault, pass, GPG, Git Credential Manager, macOS Keychain)' - static examples = [ - '<%= config.bin %> security setup', - '<%= config.bin %> security setup --json', - ] + static examples = ['<%= config.bin %> security setup', '<%= config.bin %> security setup --json'] static enableJsonFlag = true static flags = { - help: Flags.help({ char: 'h' }), + help: Flags.help({char: 'h'}), } async run() { - const { flags } = await this.parse(SecuritySetup) + const {flags} = await this.parse(SecuritySetup) const isJson = flags.json // FR-018: Detect non-interactive environments @@ -34,22 +32,19 @@ export default class SecuritySetup extends Command { if ((isCI || isNonInteractive) && !isJson) { this.error( 'This command requires an interactive terminal (TTY). Run with --json for a non-interactive health check.', - { exit: 1 }, + {exit: 1}, ) } // Detect platform const platformInfo = await detectPlatform() - const { platform } = platformInfo + const {platform} = platformInfo // FR-019: Sudo pre-flight on Linux/WSL2 if (platform !== 'macos' && !isJson) { const sudoCheck = await exec('sudo', ['-n', 'true']) if (sudoCheck.exitCode !== 0) { - this.error( - 'sudo access is required to install packages. Run `sudo -v` to authenticate and retry.', - { exit: 1 }, - ) + this.error('sudo access is required to install packages. Run `sudo -v` to authenticate and retry.', {exit: 1}) } } @@ -68,7 +63,11 @@ export default class SecuritySetup extends Command { // --------------------------------------------------------------------------- // Pre-check: show current tool status // --------------------------------------------------------------------------- - const spinner = ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking current tool status...') }).start() + const spinner = ora({ + spinner: 'arc', + color: false, + text: chalk.hex('#FF6B2B')('Checking current tool status...'), + }).start() const currentStatus = await checkToolStatus(platform) spinner.stop() @@ -100,7 +99,7 @@ export default class SecuritySetup extends Command { }) if (!understood) { this.log('Setup cancelled.') - return { platform, selection: null, tools: currentStatus, overallStatus: deriveOverallStatus(currentStatus) } + return {platform, selection: null, tools: currentStatus, overallStatus: deriveOverallStatus(currentStatus)} } // --------------------------------------------------------------------------- @@ -109,9 +108,9 @@ export default class SecuritySetup extends Command { const selectionValue = await select({ message: 'What would you like to set up?', choices: [ - { name: 'Both AWS and Git credentials (recommended)', value: 'both' }, - { name: 'AWS credentials only (aws-vault)', value: 'aws' }, - { name: 'Git credentials only (macOS Keychain / GCM)', value: 'git' }, + {name: 'Both AWS and Git credentials (recommended)', value: 'both'}, + {name: 'AWS credentials only (aws-vault)', value: 'aws'}, + {name: 'Git credentials only (macOS Keychain / GCM)', value: 'git'}, ], }) @@ -131,7 +130,7 @@ export default class SecuritySetup extends Command { name: `${k.name} <${k.email}> (${k.id})`, value: k.id, })), - { name: 'Create a new GPG key', value: '__new__' }, + {name: 'Create a new GPG key', value: '__new__'}, ] const chosen = await select({ message: 'Select a GPG key for pass and Git Credential Manager:', @@ -145,7 +144,7 @@ export default class SecuritySetup extends Command { // --------------------------------------------------------------------------- // Build steps // --------------------------------------------------------------------------- - const steps = buildSteps(platformInfo, selection, { gpgId }) + const steps = buildSteps(platformInfo, selection, {gpgId}) /** @type {SetupSession} */ const session = { @@ -166,9 +165,9 @@ export default class SecuritySetup extends Command { // FR-014: confirmation prompt before system-level changes if (step.requiresConfirmation) { - const proceed = await confirm({ message: `Proceed with: ${step.label}?`, default: true }) + const proceed = await confirm({message: `Proceed with: ${step.label}?`, default: true}) if (!proceed) { - session.results.set(step.id, { status: 'skipped', message: 'Skipped by user' }) + session.results.set(step.id, {status: 'skipped', message: 'Skipped by user'}) this.log(chalk.dim(' Skipped.')) continue } @@ -179,17 +178,17 @@ export default class SecuritySetup extends Command { this.log(chalk.cyan('\n GPG will now prompt you for a passphrase in your terminal.')) this.log(chalk.dim(' Follow the interactive prompts to complete key generation.\n')) try { - await execa('gpg', ['--full-generate-key'], { stdio: 'inherit', reject: true }) + await execa('gpg', ['--full-generate-key'], {stdio: 'inherit', reject: true}) // Refresh the gpgId from newly created key const newKeys = await listGpgKeys() if (newKeys.length > 0) { gpgId = newKeys[0].id // gpgId is now set — subsequent step closures capture it via the shared context object } - session.results.set(step.id, { status: 'success', message: `GPG key created (${gpgId || 'new key'})` }) + session.results.set(step.id, {status: 'success', message: `GPG key created (${gpgId || 'new key'})`}) this.log(chalk.green(' ✔ GPG key created')) } catch { - const result = { status: /** @type {'failed'} */ ('failed'), hint: 'Run manually: gpg --full-generate-key' } + const result = {status: /** @type {'failed'} */ ('failed'), hint: 'Run manually: gpg --full-generate-key'} session.results.set(step.id, result) this.log(chalk.red(' ✗ GPG key creation failed')) this.log(chalk.dim(` → ${result.hint}`)) @@ -200,7 +199,7 @@ export default class SecuritySetup extends Command { } // Regular step with spinner - const stepSpinner = ora({ spinner: 'arc', color: false, text: chalk.dim(step.label) }).start() + const stepSpinner = ora({spinner: 'arc', color: false, text: chalk.dim(step.label)}).start() let result try { @@ -243,7 +242,12 @@ export default class SecuritySetup extends Command { platform, selection, tools: currentStatus, - overallStatus: session.overallStatus === 'completed' ? 'success' : session.overallStatus === 'failed' ? 'partial' : 'not-configured', + overallStatus: + session.overallStatus === 'completed' + ? 'success' + : session.overallStatus === 'failed' + ? 'partial' + : 'not-configured', } } } diff --git a/src/commands/sync-config-ai/index.js b/src/commands/sync-config-ai/index.js new file mode 100644 index 0000000..e69a05d --- /dev/null +++ b/src/commands/sync-config-ai/index.js @@ -0,0 +1,143 @@ +import {Command, Flags} from '@oclif/core' +import ora from 'ora' + +import {scanEnvironments, computeCategoryCounts} from '../../services/ai-env-scanner.js' +import { + loadAIConfig, + addEntry, + updateEntry, + deactivateEntry, + activateEntry, + deleteEntry, +} from '../../services/ai-config-store.js' +import {deployEntry, undeployEntry, reconcileOnScan} from '../../services/ai-env-deployer.js' +import {loadConfig} from '../../services/config.js' +import {formatEnvironmentsTable, formatCategoriesTable} from '../../formatters/ai-config.js' +import {startTabTUI} from '../../utils/tui/tab-tui.js' +import {DvmiError} from '../../utils/errors.js' + +/** @import { DetectedEnvironment, CategoryEntry } from '../../types.js' */ + +export default class SyncConfigAi extends Command { + static description = 'Manage AI coding tool configurations across environments via TUI' + + static examples = ['<%= config.bin %> sync-config-ai', '<%= config.bin %> sync-config-ai --json'] + + static enableJsonFlag = true + + static flags = { + help: Flags.help({char: 'h'}), + } + + async run() { + const {flags} = await this.parse(SyncConfigAi) + const isJson = flags.json + + // ── Scan environments ──────────────────────────────────────────────────── + const spinner = isJson ? null : ora('Scanning AI coding environments…').start() + let detectedEnvs + + try { + detectedEnvs = scanEnvironments(process.cwd()) + } catch (err) { + spinner?.fail('Scan failed') + throw new DvmiError( + 'Failed to scan AI coding environments', + err instanceof Error ? err.message : 'Check filesystem permissions', + ) + } + + // ── Load AI config store ───────────────────────────────────────────────── + let store + try { + store = await loadAIConfig() + } catch { + spinner?.fail('Failed to load AI config') + throw new DvmiError( + 'AI config file is corrupted', + 'Delete `~/.config/dvmi/ai-config.json` to reset, or fix the JSON manually', + ) + } + + // ── Reconcile: re-deploy/undeploy based on current environment detection ─ + if (detectedEnvs.length > 0 && store.entries.length > 0) { + try { + await reconcileOnScan(store.entries, detectedEnvs, process.cwd()) + // Reload store after reconciliation in case it mutated entries + store = await loadAIConfig() + } catch { + // Reconciliation errors are non-fatal — continue with current state + } + } + + // ── Compute per-environment category counts ────────────────────────────── + for (const env of detectedEnvs) { + env.counts = computeCategoryCounts(env.id, store.entries) + } + + spinner?.stop() + + // ── JSON mode ──────────────────────────────────────────────────────────── + if (isJson) { + const categories = { + mcp: store.entries.filter((e) => e.type === 'mcp'), + command: store.entries.filter((e) => e.type === 'command'), + skill: store.entries.filter((e) => e.type === 'skill'), + agent: store.entries.filter((e) => e.type === 'agent'), + } + return {environments: detectedEnvs, categories} + } + + // ── Check chezmoi config ───────────────────────────────────────────────── + let chezmoiEnabled = false + try { + const cliConfig = await loadConfig() + chezmoiEnabled = cliConfig.dotfiles?.enabled === true + } catch { + // Non-fatal — chezmoi tip will show + } + + // ── Launch TUI ─────────────────────────────────────────────────────────── + await startTabTUI({ + envs: detectedEnvs, + entries: store.entries, + chezmoiEnabled, + formatEnvs: formatEnvironmentsTable, + formatCats: formatCategoriesTable, + refreshEntries: async () => { + const s = await loadAIConfig() + return s.entries + }, + onAction: async (action) => { + // Reload current entries for each action to avoid stale data + const currentStore = await loadAIConfig() + + if (action.type === 'create') { + const created = await addEntry({ + name: action.values.name, + type: action.tabKey || 'mcp', + environments: action.values.environments || [], + params: action.values, + }) + await deployEntry(created, detectedEnvs, process.cwd()) + } else if (action.type === 'edit') { + const updated = await updateEntry(action.id, {params: action.values}) + await deployEntry(updated, detectedEnvs, process.cwd()) + } else if (action.type === 'delete') { + await deleteEntry(action.id) + await undeployEntry( + currentStore.entries.find((e) => e.id === action.id), + detectedEnvs, + process.cwd(), + ) + } else if (action.type === 'deactivate') { + const entry = await deactivateEntry(action.id) + await undeployEntry(entry, detectedEnvs, process.cwd()) + } else if (action.type === 'activate') { + const entry = await activateEntry(action.id) + await deployEntry(entry, detectedEnvs, process.cwd()) + } + }, + }) + } +} diff --git a/src/commands/tasks/assigned.js b/src/commands/tasks/assigned.js index f3ee373..6163148 100644 --- a/src/commands/tasks/assigned.js +++ b/src/commands/tasks/assigned.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { getTasks, getTasksByList, isAuthenticated } from '../../services/clickup.js' -import { loadConfig } from '../../services/config.js' -import { renderTable } from '../../formatters/table.js' +import {getTasks, getTasksByList, isAuthenticated} from '../../services/clickup.js' +import {loadConfig} from '../../services/config.js' +import {renderTable} from '../../formatters/table.js' export default class TasksAssigned extends Command { static description = 'Task ClickUp assegnati a te (alias di tasks list)' @@ -19,7 +19,7 @@ export default class TasksAssigned extends Command { static enableJsonFlag = true static flags = { - status: Flags.string({ description: 'Filtra per status (open, in_progress, done)' }), + status: Flags.string({description: 'Filtra per status (open, in_progress, done)'}), search: Flags.string({ char: 's', description: 'Cerca nel titolo del task (case-insensitive)', @@ -30,7 +30,7 @@ export default class TasksAssigned extends Command { } async run() { - const { flags } = await this.parse(TasksAssigned) + const {flags} = await this.parse(TasksAssigned) const isJson = flags.json const config = await loadConfig() @@ -45,7 +45,9 @@ export default class TasksAssigned extends Command { this.error('ClickUp team ID not configured. Run `dvmi init` to configure ClickUp.') } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching tasks...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching tasks...')}).start() /** @param {number} count */ const onProgress = (count) => { @@ -54,40 +56,38 @@ export default class TasksAssigned extends Command { let tasks if (flags['list-id']) { - tasks = await getTasksByList(flags['list-id'], { status: flags.status }, onProgress).catch((err) => { + tasks = await getTasksByList(flags['list-id'], {status: flags.status}, onProgress).catch((err) => { spinner?.stop() this.error(err.message) }) } else { - tasks = await getTasks(/** @type {string} */ (teamId), { status: flags.status }, onProgress) + tasks = await getTasks(/** @type {string} */ (teamId), {status: flags.status}, onProgress) } spinner?.stop() // Apply search filter const searchQuery = flags.search?.toLowerCase() - const filtered = searchQuery - ? tasks.filter((t) => t.name.toLowerCase().includes(searchQuery)) - : tasks + const filtered = searchQuery ? tasks.filter((t) => t.name.toLowerCase().includes(searchQuery)) : tasks - if (isJson) return { tasks: filtered } + if (isJson) return {tasks: filtered} if (tasks.length === 0) { this.log(chalk.dim('No tasks assigned to you.')) - return { tasks: [] } + return {tasks: []} } if (filtered.length === 0) { this.log(chalk.dim('No tasks matching filters.')) - return { tasks: [] } + return {tasks: []} } // Priority label + color const priorityLabel = (p) => ['', 'URGENT', 'HIGH', 'NORMAL', 'LOW'][p] ?? String(p) const priorityColor = (label) => { if (label === 'URGENT') return chalk.red.bold(label) - if (label === 'HIGH') return chalk.yellow(label) + if (label === 'HIGH') return chalk.yellow(label) if (label === 'NORMAL') return chalk.white(label) - if (label === 'LOW') return chalk.dim(label) + if (label === 'LOW') return chalk.dim(label) return label } @@ -96,7 +96,7 @@ export default class TasksAssigned extends Command { const s = status.toLowerCase() if (s.includes('done') || s.includes('complet') || s.includes('closed')) return chalk.green(status) if (s.includes('progress') || s.includes('active') || s.includes('open')) return chalk.cyan(status) - if (s.includes('block') || s.includes('review') || s.includes('wait')) return chalk.yellow(status) + if (s.includes('block') || s.includes('review') || s.includes('wait')) return chalk.yellow(status) return chalk.dim(status) } @@ -105,27 +105,37 @@ export default class TasksAssigned extends Command { flags.status && chalk.dim(`status: ${chalk.white(flags.status)}`), flags.search && chalk.dim(`search: ${chalk.white(`"${flags.search}"`)}`), flags['list-id'] && chalk.dim(`list-id: ${chalk.white(flags['list-id'])}`), - ].filter(Boolean).join(chalk.dim(' · ')) + ] + .filter(Boolean) + .join(chalk.dim(' · ')) this.log( chalk.bold('\nYour assigned tasks') + - (filterInfo ? chalk.dim(' — ') + filterInfo : '') + - chalk.dim(` (${filtered.length}${filtered.length < tasks.length ? `/${tasks.length}` : ''})`) + - '\n', + (filterInfo ? chalk.dim(' — ') + filterInfo : '') + + chalk.dim(` (${filtered.length}${filtered.length < tasks.length ? `/${tasks.length}` : ''})`) + + '\n', ) - this.log(renderTable(filtered, [ - { header: 'ID', key: 'id', width: 10 }, - { header: 'Link', key: 'url', width: 42, format: (v) => v ?? '—' }, - { header: 'Priority', key: 'priority', width: 8, format: (v) => priorityLabel(Number(v)), colorize: priorityColor }, - { header: 'Status', key: 'status', width: 15, colorize: statusColor }, - { header: 'Due', key: 'dueDate', width: 12, format: (v) => v ?? '—' }, - { header: 'Lista', key: 'listName', width: 20, format: (v) => v ?? '—' }, - { header: 'Cartella', key: 'folderName', width: 20, format: (v) => v ?? '—' }, - { header: 'Description', key: 'name', width: 55 }, - ])) + this.log( + renderTable(filtered, [ + {header: 'ID', key: 'id', width: 10}, + {header: 'Link', key: 'url', width: 42, format: (v) => v ?? '—'}, + { + header: 'Priority', + key: 'priority', + width: 8, + format: (v) => priorityLabel(Number(v)), + colorize: priorityColor, + }, + {header: 'Status', key: 'status', width: 15, colorize: statusColor}, + {header: 'Due', key: 'dueDate', width: 12, format: (v) => v ?? '—'}, + {header: 'Lista', key: 'listName', width: 20, format: (v) => v ?? '—'}, + {header: 'Cartella', key: 'folderName', width: 20, format: (v) => v ?? '—'}, + {header: 'Description', key: 'name', width: 55}, + ]), + ) this.log('') - return { tasks: filtered } + return {tasks: filtered} } } diff --git a/src/commands/tasks/list.js b/src/commands/tasks/list.js index 8c300fa..ab0aa3a 100644 --- a/src/commands/tasks/list.js +++ b/src/commands/tasks/list.js @@ -1,9 +1,9 @@ -import { Command, Flags } from '@oclif/core' +import {Command, Flags} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { getTasks, getTasksByList, isAuthenticated } from '../../services/clickup.js' -import { loadConfig } from '../../services/config.js' -import { renderTable } from '../../formatters/table.js' +import {getTasks, getTasksByList, isAuthenticated} from '../../services/clickup.js' +import {loadConfig} from '../../services/config.js' +import {renderTable} from '../../formatters/table.js' export default class TasksList extends Command { static description = 'Task ClickUp assegnati a te' @@ -21,7 +21,7 @@ export default class TasksList extends Command { static enableJsonFlag = true static flags = { - status: Flags.string({ description: 'Filtra per status (open, in_progress, done)' }), + status: Flags.string({description: 'Filtra per status (open, in_progress, done)'}), search: Flags.string({ char: 's', description: 'Cerca nel titolo del task (case-insensitive)', @@ -32,7 +32,7 @@ export default class TasksList extends Command { } async run() { - const { flags } = await this.parse(TasksList) + const {flags} = await this.parse(TasksList) const isJson = flags.json const config = await loadConfig() @@ -47,7 +47,9 @@ export default class TasksList extends Command { this.error('ClickUp team ID not configured. Run `dvmi init` to configure ClickUp.') } - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching tasks...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching tasks...')}).start() /** @param {number} count */ const onProgress = (count) => { @@ -56,40 +58,38 @@ export default class TasksList extends Command { let tasks if (flags['list-id']) { - tasks = await getTasksByList(flags['list-id'], { status: flags.status }, onProgress).catch((err) => { + tasks = await getTasksByList(flags['list-id'], {status: flags.status}, onProgress).catch((err) => { spinner?.stop() this.error(err.message) }) } else { - tasks = await getTasks(/** @type {string} */ (teamId), { status: flags.status }, onProgress) + tasks = await getTasks(/** @type {string} */ (teamId), {status: flags.status}, onProgress) } spinner?.stop() // Apply search filter const searchQuery = flags.search?.toLowerCase() - const filtered = searchQuery - ? tasks.filter((t) => t.name.toLowerCase().includes(searchQuery)) - : tasks + const filtered = searchQuery ? tasks.filter((t) => t.name.toLowerCase().includes(searchQuery)) : tasks - if (isJson) return { tasks: filtered } + if (isJson) return {tasks: filtered} if (tasks.length === 0) { this.log(chalk.dim('No tasks assigned to you.')) - return { tasks: [] } + return {tasks: []} } if (filtered.length === 0) { this.log(chalk.dim(`No tasks matching filters.`)) - return { tasks: [] } + return {tasks: []} } // Priority label + color const priorityLabel = (p) => ['', 'URGENT', 'HIGH', 'NORMAL', 'LOW'][p] ?? String(p) const priorityColor = (label) => { if (label === 'URGENT') return chalk.red.bold(label) - if (label === 'HIGH') return chalk.yellow(label) + if (label === 'HIGH') return chalk.yellow(label) if (label === 'NORMAL') return chalk.white(label) - if (label === 'LOW') return chalk.dim(label) + if (label === 'LOW') return chalk.dim(label) return label } @@ -98,7 +98,7 @@ export default class TasksList extends Command { const s = status.toLowerCase() if (s.includes('done') || s.includes('complet') || s.includes('closed')) return chalk.green(status) if (s.includes('progress') || s.includes('active') || s.includes('open')) return chalk.cyan(status) - if (s.includes('block') || s.includes('review') || s.includes('wait')) return chalk.yellow(status) + if (s.includes('block') || s.includes('review') || s.includes('wait')) return chalk.yellow(status) return chalk.dim(status) } @@ -107,27 +107,37 @@ export default class TasksList extends Command { flags.status && chalk.dim(`status: ${chalk.white(flags.status)}`), flags.search && chalk.dim(`search: ${chalk.white(`"${flags.search}"`)}`), flags['list-id'] && chalk.dim(`list-id: ${chalk.white(flags['list-id'])}`), - ].filter(Boolean).join(chalk.dim(' · ')) + ] + .filter(Boolean) + .join(chalk.dim(' · ')) this.log( chalk.bold('\nYour tasks') + - (filterInfo ? chalk.dim(' — ') + filterInfo : '') + - chalk.dim(` (${filtered.length}${filtered.length < tasks.length ? `/${tasks.length}` : ''})`) + - '\n', + (filterInfo ? chalk.dim(' — ') + filterInfo : '') + + chalk.dim(` (${filtered.length}${filtered.length < tasks.length ? `/${tasks.length}` : ''})`) + + '\n', ) - this.log(renderTable(filtered, [ - { header: 'ID', key: 'id', width: 10 }, - { header: 'Link', key: 'url', width: 42, format: (v) => v ?? '—' }, - { header: 'Priority', key: 'priority', width: 8, format: (v) => priorityLabel(Number(v)), colorize: priorityColor }, - { header: 'Status', key: 'status', width: 15, colorize: statusColor }, - { header: 'Due', key: 'dueDate', width: 12, format: (v) => v ?? '—' }, - { header: 'Lista', key: 'listName', width: 20, format: (v) => v ?? '—' }, - { header: 'Cartella', key: 'folderName', width: 20, format: (v) => v ?? '—' }, - { header: 'Description', key: 'name', width: 55 }, - ])) + this.log( + renderTable(filtered, [ + {header: 'ID', key: 'id', width: 10}, + {header: 'Link', key: 'url', width: 42, format: (v) => v ?? '—'}, + { + header: 'Priority', + key: 'priority', + width: 8, + format: (v) => priorityLabel(Number(v)), + colorize: priorityColor, + }, + {header: 'Status', key: 'status', width: 15, colorize: statusColor}, + {header: 'Due', key: 'dueDate', width: 12, format: (v) => v ?? '—'}, + {header: 'Lista', key: 'listName', width: 20, format: (v) => v ?? '—'}, + {header: 'Cartella', key: 'folderName', width: 20, format: (v) => v ?? '—'}, + {header: 'Description', key: 'name', width: 55}, + ]), + ) this.log('') - return { tasks: filtered } + return {tasks: filtered} } } diff --git a/src/commands/tasks/today.js b/src/commands/tasks/today.js index 11ec775..c5e36e7 100644 --- a/src/commands/tasks/today.js +++ b/src/commands/tasks/today.js @@ -1,9 +1,9 @@ -import { Command } from '@oclif/core' +import {Command} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { getTasksToday, isAuthenticated } from '../../services/clickup.js' -import { loadConfig } from '../../services/config.js' -import { renderTable } from '../../formatters/table.js' +import {getTasksToday, isAuthenticated} from '../../services/clickup.js' +import {loadConfig} from '../../services/config.js' +import {renderTable} from '../../formatters/table.js' /** * Return today's date as a local YYYY-MM-DD string. @@ -15,17 +15,15 @@ function localTodayString() { } export default class TasksToday extends Command { - static description = 'Task in lavorazione oggi: data odierna nel range [startDate, dueDate]. Include task scaduti non conclusi.' + static description = + 'Task in lavorazione oggi: data odierna nel range [startDate, dueDate]. Include task scaduti non conclusi.' - static examples = [ - '<%= config.bin %> tasks today', - '<%= config.bin %> tasks today --json', - ] + static examples = ['<%= config.bin %> tasks today', '<%= config.bin %> tasks today --json'] static enableJsonFlag = true async run() { - const { flags } = await this.parse(TasksToday) + const {flags} = await this.parse(TasksToday) const isJson = flags.json const config = await loadConfig() @@ -36,38 +34,42 @@ export default class TasksToday extends Command { const teamId = config.clickup?.teamId if (!teamId) this.error('ClickUp team ID not configured. Run `dvmi init` to configure ClickUp.') - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching today\'s tasks...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')("Fetching today's tasks...")}).start() const tasks = await getTasksToday(teamId) spinner?.stop() - if (isJson) return { tasks } + if (isJson) return {tasks} if (tasks.length === 0) { this.log(chalk.dim('No tasks for today.')) this.log(chalk.dim('Check `dvmi tasks list` for all assigned tasks.')) - return { tasks: [] } + return {tasks: []} } const today = localTodayString() - this.log(chalk.bold('\nToday\'s tasks:\n')) - this.log(renderTable(tasks, [ - { header: 'Title', key: 'name', width: 45 }, - { header: 'Status', key: 'status', width: 15 }, - { - header: 'Due', - key: 'dueDate', - width: 12, - format: (v) => v ?? '—', - colorize: (v) => { - if (!v) return chalk.dim('—') - if (v < today) return chalk.red.bold(v) - return v + this.log(chalk.bold("\nToday's tasks:\n")) + this.log( + renderTable(tasks, [ + {header: 'Title', key: 'name', width: 45}, + {header: 'Status', key: 'status', width: 15}, + { + header: 'Due', + key: 'dueDate', + width: 12, + format: (v) => v ?? '—', + colorize: (v) => { + if (!v) return chalk.dim('—') + if (v < today) return chalk.red.bold(v) + return v + }, }, - }, - { header: 'Link', key: 'url', format: (v) => v ?? '—' }, - ])) + {header: 'Link', key: 'url', format: (v) => v ?? '—'}, + ]), + ) - return { tasks } + return {tasks} } } diff --git a/src/commands/upgrade.js b/src/commands/upgrade.js index e2b083b..2bc389a 100644 --- a/src/commands/upgrade.js +++ b/src/commands/upgrade.js @@ -1,25 +1,24 @@ -import { Command } from '@oclif/core' +import {Command} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { checkForUpdate } from '../services/version-check.js' -import { exec } from '../services/shell.js' +import {checkForUpdate} from '../services/version-check.js' +import {exec} from '../services/shell.js' export default class Upgrade extends Command { - static description = 'Aggiorna la CLI all\'ultima versione disponibile' + static description = "Aggiorna la CLI all'ultima versione disponibile" - static examples = [ - '<%= config.bin %> upgrade', - '<%= config.bin %> upgrade --json', - ] + static examples = ['<%= config.bin %> upgrade', '<%= config.bin %> upgrade --json'] static enableJsonFlag = true async run() { - const { flags } = await this.parse(Upgrade) + const {flags} = await this.parse(Upgrade) const isJson = flags.json - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking for updates...') }).start() - const { hasUpdate, current, latest } = await checkForUpdate({ force: true }) + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Checking for updates...')}).start() + const {hasUpdate, current, latest} = await checkForUpdate({force: true}) spinner?.stop() // Guard against malformed version strings from the GitHub Releases API @@ -29,7 +28,7 @@ export default class Upgrade extends Command { if (!hasUpdate) { const msg = `You're already on the latest version (${current})` - if (isJson) return { currentVersion: current, latestVersion: latest, updated: false } + if (isJson) return {currentVersion: current, latestVersion: latest, updated: false} this.log(chalk.green('✓') + ' ' + msg) return } @@ -38,17 +37,19 @@ export default class Upgrade extends Command { this.log(`Updating from ${chalk.yellow(current)} to ${chalk.green(latest)}`) } - const updateSpinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Installing update...') }).start() - // Non passare --registry globale: verrebbe usato anche per le dipendenze. - // ~/.npmrc ha già devvami:registry per il solo scope corretto. - const result = await exec('npm', ['install', '-g', `devvami@${latest}`]) + const updateSpinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Installing update...')}).start() + // Non passare --registry globale: verrebbe usato anche per le dipendenze. + // ~/.npmrc ha già devvami:registry per il solo scope corretto. + const result = await exec('npm', ['install', '-g', `devvami@${latest}`]) if (result.exitCode !== 0) { updateSpinner?.fail('Update failed') this.error(`Update failed: ${result.stderr}`) } updateSpinner?.succeed(`Updated to ${latest}`) - const response = { currentVersion: current, latestVersion: latest, updated: true } + const response = {currentVersion: current, latestVersion: latest, updated: true} if (isJson) return response this.log(chalk.green('✓') + ` Successfully updated to ${latest}`) diff --git a/src/commands/vuln/detail.js b/src/commands/vuln/detail.js index d254447..b55c2ae 100644 --- a/src/commands/vuln/detail.js +++ b/src/commands/vuln/detail.js @@ -1,9 +1,9 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import ora from 'ora' -import { getCveDetail } from '../../services/nvd.js' -import { formatCveDetail } from '../../formatters/vuln.js' -import { openBrowser } from '../../utils/open-browser.js' -import { ValidationError } from '../../utils/errors.js' +import {getCveDetail} from '../../services/nvd.js' +import {formatCveDetail} from '../../formatters/vuln.js' +import {openBrowser} from '../../utils/open-browser.js' +import {ValidationError} from '../../utils/errors.js' export default class VulnDetail extends Command { static description = 'View full details for a specific CVE' @@ -17,7 +17,7 @@ export default class VulnDetail extends Command { static enableJsonFlag = true static args = { - cveId: Args.string({ description: 'CVE identifier (e.g. CVE-2021-44228)', required: true }), + cveId: Args.string({description: 'CVE identifier (e.g. CVE-2021-44228)', required: true}), } static flags = { @@ -29,9 +29,9 @@ export default class VulnDetail extends Command { } async run() { - const { args, flags } = await this.parse(VulnDetail) + const {args, flags} = await this.parse(VulnDetail) const isJson = flags.json - const { cveId } = args + const {cveId} = args if (!cveId || !/^CVE-\d{4}-\d{4,}$/i.test(cveId)) { throw new ValidationError( diff --git a/src/commands/vuln/scan.js b/src/commands/vuln/scan.js index a9e627f..1d1058a 100644 --- a/src/commands/vuln/scan.js +++ b/src/commands/vuln/scan.js @@ -1,12 +1,18 @@ -import { Command, Flags } from '@oclif/core' -import { writeFile } from 'node:fs/promises' +import {Command, Flags} from '@oclif/core' +import {writeFile} from 'node:fs/promises' import ora from 'ora' import chalk from 'chalk' -import { detectEcosystems, supportedEcosystemsMessage } from '../../services/audit-detector.js' -import { runAudit, summarizeFindings, filterBySeverity } from '../../services/audit-runner.js' -import { formatFindingsTable, formatScanSummary, formatMarkdownReport, truncate, colorSeverity } from '../../formatters/vuln.js' -import { getCveDetail } from '../../services/nvd.js' -import { startInteractiveTable } from '../../utils/tui/navigable-table.js' +import {detectEcosystems, supportedEcosystemsMessage} from '../../services/audit-detector.js' +import {runAudit, summarizeFindings, filterBySeverity} from '../../services/audit-runner.js' +import { + formatFindingsTable, + formatScanSummary, + formatMarkdownReport, + truncate, + colorSeverity, +} from '../../formatters/vuln.js' +import {getCveDetail} from '../../services/nvd.js' +import {startInteractiveTable} from '../../utils/tui/navigable-table.js' // Minimum terminal rows required to show the interactive TUI (same threshold as vuln search) const MIN_TTY_ROWS = 6 @@ -49,9 +55,9 @@ export default class VulnScan extends Command { } async run() { - const { flags } = await this.parse(VulnScan) + const {flags} = await this.parse(VulnScan) const isJson = flags.json - const { severity, 'no-fail': noFail, report } = flags + const {severity, 'no-fail': noFail, report} = flags const projectPath = process.env.DVMI_SCAN_DIR ?? process.cwd() const scanDate = new Date().toISOString() @@ -66,8 +72,8 @@ export default class VulnScan extends Command { scanDate, ecosystems: [], findings: [], - summary: { critical: 0, high: 0, medium: 0, low: 0, unknown: 0, total: 0 }, - errors: [{ ecosystem: 'none', message: 'No supported package manager detected.' }], + summary: {critical: 0, high: 0, medium: 0, low: 0, unknown: 0, total: 0}, + errors: [{ecosystem: 'none', message: 'No supported package manager detected.'}], } } @@ -99,11 +105,11 @@ export default class VulnScan extends Command { for (const eco of ecosystems) { const spinner = isJson ? null : ora(` Scanning ${eco.name} dependencies...`).start() - const { findings, error } = await runAudit(eco) + const {findings, error} = await runAudit(eco) if (error) { spinner?.fail(` Scanning ${eco.name} dependencies... failed`) - errors.push({ ecosystem: eco.name, message: error }) + errors.push({ecosystem: eco.name, message: error}) } else { spinner?.succeed(` Scanning ${eco.name} dependencies... done`) allFindings.push(...findings) @@ -170,25 +176,38 @@ export default class VulnScan extends Command { /** @type {import('../../utils/tui/navigable-table.js').TableColumnDef[]} */ const columns = [ - { header: 'Package', key: 'pkg', width: COL_WIDTHS.pkg }, - { header: 'Version', key: 'version', width: COL_WIDTHS.version }, - { header: 'Severity', key: 'severity', width: COL_WIDTHS.severity, colorize: (v) => colorSeverity(v) }, - { header: 'CVE', key: 'cve', width: COL_WIDTHS.cve, colorize: (v) => (v !== '—' ? chalk.cyan(v) : chalk.gray(v)) }, - { header: 'Title', key: 'title', width: titleWidth }, + {header: 'Package', key: 'pkg', width: COL_WIDTHS.pkg}, + {header: 'Version', key: 'version', width: COL_WIDTHS.version}, + {header: 'Severity', key: 'severity', width: COL_WIDTHS.severity, colorize: (v) => colorSeverity(v)}, + { + header: 'CVE', + key: 'cve', + width: COL_WIDTHS.cve, + colorize: (v) => (v !== '—' ? chalk.cyan(v) : chalk.gray(v)), + }, + {header: 'Title', key: 'title', width: titleWidth}, ] await startInteractiveTable(rows, columns, heading, filteredFindings.length, getCveDetail) } else { // Non-TTY fallback: static table + summary (unchanged from pre-TUI behaviour) if (filteredFindings.length > 0) { - this.log(chalk.bold(` Findings (${filteredFindings.length} ${filteredFindings.length === 1 ? 'vulnerability' : 'vulnerabilities'})`)) + this.log( + chalk.bold( + ` Findings (${filteredFindings.length} ${filteredFindings.length === 1 ? 'vulnerability' : 'vulnerabilities'})`, + ), + ) this.log('') this.log(formatFindingsTable(filteredFindings)) this.log('') this.log(chalk.bold(' Summary')) this.log(formatScanSummary(summary)) this.log('') - this.log(chalk.yellow(` ⚠ ${filteredFindings.length} ${filteredFindings.length === 1 ? 'vulnerability' : 'vulnerabilities'} found. Run \`dvmi vuln detail \` for details.`)) + this.log( + chalk.yellow( + ` ⚠ ${filteredFindings.length} ${filteredFindings.length === 1 ? 'vulnerability' : 'vulnerabilities'} found. Run \`dvmi vuln detail \` for details.`, + ), + ) } } diff --git a/src/commands/vuln/search.js b/src/commands/vuln/search.js index 3d330ac..5155cb7 100644 --- a/src/commands/vuln/search.js +++ b/src/commands/vuln/search.js @@ -1,10 +1,10 @@ -import { Command, Args, Flags } from '@oclif/core' +import {Command, Args, Flags} from '@oclif/core' import ora from 'ora' import chalk from 'chalk' -import { searchCves, getCveDetail } from '../../services/nvd.js' -import { formatCveSearchTable, colorSeverity, formatScore, formatDate, truncate } from '../../formatters/vuln.js' -import { startInteractiveTable } from '../../utils/tui/navigable-table.js' -import { ValidationError } from '../../utils/errors.js' +import {searchCves, getCveDetail} from '../../services/nvd.js' +import {formatCveSearchTable, colorSeverity, formatScore, formatDate, truncate} from '../../formatters/vuln.js' +import {startInteractiveTable} from '../../utils/tui/navigable-table.js' +import {ValidationError} from '../../utils/errors.js' // Minimum terminal rows required to show the interactive TUI const MIN_TTY_ROWS = 6 @@ -33,7 +33,10 @@ export default class VulnSearch extends Command { static enableJsonFlag = true static args = { - keyword: Args.string({ description: 'Product, library, or keyword to search for (optional — omit to see all recent CVEs)', required: false }), + keyword: Args.string({ + description: 'Product, library, or keyword to search for (optional — omit to see all recent CVEs)', + required: false, + }), } static flags = { @@ -55,11 +58,11 @@ export default class VulnSearch extends Command { } async run() { - const { args, flags } = await this.parse(VulnSearch) + const {args, flags} = await this.parse(VulnSearch) const isJson = flags.json - const { keyword } = args - const { days, severity, limit } = flags + const {keyword} = args + const {days, severity, limit} = flags if (days < 1 || days > 120) { throw new ValidationError( @@ -75,13 +78,15 @@ export default class VulnSearch extends Command { ) } - const spinner = isJson ? null : ora(keyword ? `Searching NVD for "${keyword}"...` : `Fetching recent CVEs (last ${days} days)...`).start() + const spinner = isJson + ? null + : ora(keyword ? `Searching NVD for "${keyword}"...` : `Fetching recent CVEs (last ${days} days)...`).start() try { - const { results, totalResults } = await searchCves({ keyword, days, severity, limit }) + const {results, totalResults} = await searchCves({keyword, days, severity, limit}) spinner?.stop() - const result = { keyword: keyword ?? null, days, severity: severity ?? null, totalResults, results } + const result = {keyword: keyword ?? null, days, severity: severity ?? null, totalResults, results} if (isJson) return result @@ -108,12 +113,12 @@ export default class VulnSearch extends Command { /** @type {import('../../utils/tui/navigable-table.js').TableColumnDef[]} */ const columns = [ - { header: 'CVE ID', key: 'id', width: COL_WIDTHS.id, colorize: (v) => chalk.cyan(v) }, - { header: 'Severity', key: 'severity', width: COL_WIDTHS.severity, colorize: (v) => colorSeverity(v) }, - { header: 'Score', key: 'score', width: COL_WIDTHS.score }, - { header: 'Published', key: 'published', width: COL_WIDTHS.published }, - { header: 'Description', key: 'description', width: descWidth }, - { header: 'Reference', key: 'reference', width: COL_WIDTHS.reference }, + {header: 'CVE ID', key: 'id', width: COL_WIDTHS.id, colorize: (v) => chalk.cyan(v)}, + {header: 'Severity', key: 'severity', width: COL_WIDTHS.severity, colorize: (v) => colorSeverity(v)}, + {header: 'Score', key: 'score', width: COL_WIDTHS.score}, + {header: 'Published', key: 'published', width: COL_WIDTHS.published}, + {header: 'Description', key: 'description', width: descWidth}, + {header: 'Reference', key: 'reference', width: COL_WIDTHS.reference}, ] await startInteractiveTable(rows, columns, heading, totalResults, getCveDetail) diff --git a/src/commands/welcome.js b/src/commands/welcome.js index 90ecb33..4d64ef2 100644 --- a/src/commands/welcome.js +++ b/src/commands/welcome.js @@ -1,5 +1,5 @@ -import { Command } from '@oclif/core' -import { printWelcomeScreen } from '../utils/welcome.js' +import {Command} from '@oclif/core' +import {printWelcomeScreen} from '../utils/welcome.js' /** * Display the dvmi cyberpunk mission dashboard. diff --git a/src/commands/whoami.js b/src/commands/whoami.js index 0ef6637..6fada92 100644 --- a/src/commands/whoami.js +++ b/src/commands/whoami.js @@ -1,66 +1,62 @@ -import { Command } from '@oclif/core' +import {Command} from '@oclif/core' import chalk from 'chalk' import ora from 'ora' -import { createOctokit } from '../services/github.js' -import { checkAWSAuth } from '../services/auth.js' -import { getCurrentVersion } from '../services/version-check.js' -import { CONFIG_PATH, loadConfig } from '../services/config.js' -import { getUser, isAuthenticated } from '../services/clickup.js' +import {createOctokit} from '../services/github.js' +import {checkAWSAuth} from '../services/auth.js' +import {getCurrentVersion} from '../services/version-check.js' +import {CONFIG_PATH, loadConfig} from '../services/config.js' +import {getUser, isAuthenticated} from '../services/clickup.js' export default class Whoami extends Command { static description = 'Mostra la tua identita su GitHub, AWS e ClickUp' - static examples = [ - '<%= config.bin %> whoami', - '<%= config.bin %> whoami --json', - ] + static examples = ['<%= config.bin %> whoami', '<%= config.bin %> whoami --json'] static enableJsonFlag = true async run() { - const { flags } = await this.parse(Whoami) + const {flags} = await this.parse(Whoami) const isJson = flags.json - const spinner = isJson ? null : ora({ spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching identity...') }).start() + const spinner = isJson + ? null + : ora({spinner: 'arc', color: false, text: chalk.hex('#FF6B2B')('Fetching identity...')}).start() const [ghResult, awsResult, version, cuResult] = await Promise.allSettled([ (async () => { const octokit = await createOctokit() - const { data: user } = await octokit.rest.users.getAuthenticated() - return { username: user.login, name: user.name ?? '', org: '', teams: [] } + const {data: user} = await octokit.rest.users.getAuthenticated() + return {username: user.login, name: user.name ?? '', org: '', teams: []} })(), checkAWSAuth(), getCurrentVersion(), (async () => { if (!(await isAuthenticated())) return null const [user, config] = await Promise.all([getUser(), loadConfig()]) - return { username: user.username, teamName: config.clickup?.teamName ?? null } + return {username: user.username, teamName: config.clickup?.teamName ?? null} })(), ]) spinner?.stop() - const github = - ghResult.status === 'fulfilled' - ? ghResult.value - : { username: null, error: '[NOT AUTHENTICATED]' } + const github = ghResult.status === 'fulfilled' ? ghResult.value : {username: null, error: '[NOT AUTHENTICATED]'} const aws = awsResult.status === 'fulfilled' && awsResult.value.authenticated - ? { accountId: awsResult.value.account, role: awsResult.value.role } - : { accountId: null, error: '[NOT AUTHENTICATED]' } + ? {accountId: awsResult.value.account, role: awsResult.value.role} + : {accountId: null, error: '[NOT AUTHENTICATED]'} const clickup = cuResult.status === 'fulfilled' && cuResult.value ? cuResult.value - : { username: null, teamName: null, error: '[NOT AUTHENTICATED]' } + : {username: null, teamName: null, error: '[NOT AUTHENTICATED]'} const cli = { version: version.status === 'fulfilled' ? version.value : '?', configPath: CONFIG_PATH, } - const result = { github, aws, clickup, cli } + const result = {github, aws, clickup, cli} if (isJson) return result diff --git a/src/formatters/ai-config.js b/src/formatters/ai-config.js new file mode 100644 index 0000000..de68414 --- /dev/null +++ b/src/formatters/ai-config.js @@ -0,0 +1,127 @@ +import chalk from 'chalk' + +/** @import { DetectedEnvironment, CategoryEntry } from '../types.js' */ + +// ────────────────────────────────────────────────────────────────────────────── +// Internal helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Pad a string to a fixed width, truncating with '…' if needed. + * @param {string} str + * @param {number} width + * @returns {string} + */ +function padCell(str, width) { + if (!str) str = '' + if (str.length > width) return str.slice(0, width - 1) + '…' + return str.padEnd(width) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Environments table formatter +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Format a list of detected environments as a table string for display in the TUI. + * Columns: Environment (name), Status, Scope, MCPs, Commands, Skills, Agents + * @param {DetectedEnvironment[]} detectedEnvs + * @param {number} [termCols] + * @returns {string[]} Array of formatted lines (no ANSI clear/home) + */ +export function formatEnvironmentsTable(detectedEnvs, termCols = 120) { + const COL_ENV = 22 + const COL_STATUS = 24 + const COL_SCOPE = 8 + const COL_COUNT = 9 + + const headerParts = [ + chalk.bold.white(padCell('Environment', COL_ENV)), + chalk.bold.white(padCell('Status', COL_STATUS)), + chalk.bold.white(padCell('Scope', COL_SCOPE)), + chalk.bold.white(padCell('MCPs', COL_COUNT)), + chalk.bold.white(padCell('Commands', COL_COUNT)), + chalk.bold.white(padCell('Skills', COL_COUNT)), + chalk.bold.white(padCell('Agents', COL_COUNT)), + ] + + const dividerWidth = COL_ENV + COL_STATUS + COL_SCOPE + COL_COUNT * 4 + 6 * 2 + const lines = [] + lines.push(headerParts.join(' ')) + lines.push(chalk.dim('─'.repeat(Math.min(termCols, dividerWidth)))) + + for (const env of detectedEnvs) { + const hasUnreadable = env.unreadable.length > 0 + const statusText = hasUnreadable ? 'Detected (unreadable)' : 'Detected' + const statusStr = hasUnreadable + ? chalk.yellow(padCell(statusText, COL_STATUS)) + : chalk.green(padCell(statusText, COL_STATUS)) + const scopeStr = padCell(env.scope ?? 'project', COL_SCOPE) + + const mcpStr = padCell(String(env.counts.mcp), COL_COUNT) + const cmdStr = padCell(String(env.counts.command), COL_COUNT) + const skillStr = env.supportedCategories.includes('skill') + ? padCell(String(env.counts.skill), COL_COUNT) + : padCell('—', COL_COUNT) + const agentStr = env.supportedCategories.includes('agent') + ? padCell(String(env.counts.agent), COL_COUNT) + : padCell('—', COL_COUNT) + + lines.push([padCell(env.name, COL_ENV), statusStr, scopeStr, mcpStr, cmdStr, skillStr, agentStr].join(' ')) + } + + return lines +} + +// ────────────────────────────────────────────────────────────────────────────── +// Categories table formatter +// ────────────────────────────────────────────────────────────────────────────── + +/** @type {Record} */ +const ENV_SHORT_NAMES = { + 'vscode-copilot': 'VSCode', + 'claude-code': 'Claude', + opencode: 'OpenCode', + 'gemini-cli': 'Gemini', + 'copilot-cli': 'Copilot', +} + +/** + * Format a list of category entries as a table string for display in the TUI. + * Columns: Name, Type, Status, Environments + * @param {CategoryEntry[]} entries + * @param {number} [termCols] + * @returns {string[]} Array of formatted lines (no ANSI clear/home) + */ +export function formatCategoriesTable(entries, termCols = 120) { + const COL_NAME = 24 + const COL_TYPE = 9 + const COL_STATUS = 10 + const COL_ENVS = 36 + + const headerParts = [ + chalk.bold.white(padCell('Name', COL_NAME)), + chalk.bold.white(padCell('Type', COL_TYPE)), + chalk.bold.white(padCell('Status', COL_STATUS)), + chalk.bold.white(padCell('Environments', COL_ENVS)), + ] + + const dividerWidth = COL_NAME + COL_TYPE + COL_STATUS + COL_ENVS + 3 * 2 + const lines = [] + lines.push(headerParts.join(' ')) + lines.push(chalk.dim('─'.repeat(Math.min(termCols, dividerWidth)))) + + for (const entry of entries) { + const statusStr = entry.active + ? chalk.green(padCell('Active', COL_STATUS)) + : chalk.dim(padCell('Inactive', COL_STATUS)) + + const envNames = entry.environments.map((id) => ENV_SHORT_NAMES[id] ?? id).join(', ') + + lines.push( + [padCell(entry.name, COL_NAME), padCell(entry.type, COL_TYPE), statusStr, padCell(envNames, COL_ENVS)].join(' '), + ) + } + + return lines +} diff --git a/src/formatters/charts.js b/src/formatters/charts.js index 32573db..894c231 100644 --- a/src/formatters/charts.js +++ b/src/formatters/charts.js @@ -3,16 +3,7 @@ import chalk from 'chalk' /** @import { ChartSeries } from '../types.js' */ // Colour palette for multi-series charts (cycles if more than 8 series) -const PALETTE = [ - chalk.cyan, - chalk.yellow, - chalk.green, - chalk.magenta, - chalk.blue, - chalk.red, - chalk.white, - chalk.gray, -] +const PALETTE = [chalk.cyan, chalk.yellow, chalk.green, chalk.magenta, chalk.blue, chalk.red, chalk.white, chalk.gray] /** * Get the terminal width, falling back to 80 columns. @@ -54,9 +45,7 @@ export function barChart(series, options = {}) { // Combine all series into per-label totals for scaling const allLabels = series[0]?.labels ?? [] - const totals = allLabels.map((_, i) => - series.reduce((sum, s) => sum + (s.values[i] ?? 0), 0), - ) + const totals = allLabels.map((_, i) => series.reduce((sum, s) => sum + (s.values[i] ?? 0), 0)) const maxTotal = Math.max(...totals, 0) const lines = [] @@ -71,7 +60,7 @@ export function barChart(series, options = {}) { // Build chart column by column (one char per day) // We render it as a 2D grid: rows = height levels, cols = days - const grid = Array.from({ length: BAR_HEIGHT }, () => Array(allLabels.length).fill(' ')) + const grid = Array.from({length: BAR_HEIGHT}, () => Array(allLabels.length).fill(' ')) for (let col = 0; col < allLabels.length; col++) { const total = totals[col] @@ -110,9 +99,7 @@ export function barChart(series, options = {}) { // X-axis date labels (sample every ~10 positions) const step = Math.max(1, Math.ceil(allLabels.length / Math.floor(chartWidth / 10))) - const xLabels = allLabels - .filter((_, i) => i % step === 0) - .map((l) => l.slice(5)) // "MM-DD" + const xLabels = allLabels.filter((_, i) => i % step === 0).map((l) => l.slice(5)) // "MM-DD" lines.push(' '.repeat(labelColWidth + 1) + xLabels.join(' ')) // Legend for multi-series @@ -156,9 +143,7 @@ export function lineChart(series, options = {}) { } // Build a 2D canvas: rows = chartHeight, cols = chartWidth - const canvas = Array.from({ length: chartHeight }, () => - Array(chartWidth).fill(' '), - ) + const canvas = Array.from({length: chartHeight}, () => Array(chartWidth).fill(' ')) const step = Math.max(1, Math.ceil(allLabels.length / chartWidth)) @@ -186,9 +171,7 @@ export function lineChart(series, options = {}) { // X-axis date labels const xStep = Math.max(1, Math.ceil(allLabels.length / Math.floor(chartWidth / 10))) - const xLabels = allLabels - .filter((_, i) => i % xStep === 0) - .map((l) => l.slice(5)) + const xLabels = allLabels.filter((_, i) => i % xStep === 0).map((l) => l.slice(5)) lines.push(' '.repeat(labelColWidth + 1) + xLabels.join(' ')) // Legend diff --git a/src/formatters/cost.js b/src/formatters/cost.js index 7eb6706..0bd2169 100644 --- a/src/formatters/cost.js +++ b/src/formatters/cost.js @@ -57,11 +57,5 @@ export function formatCostTable(entries, label, groupBy = 'service') { .map((e) => ` ${rowLabel(e, groupBy).padEnd(40)} ${formatCurrency(e.amount)}`) .join('\n') const divider = '─'.repeat(50) - return [ - `Costs for: ${label}`, - divider, - rows, - divider, - ` ${'Total'.padEnd(40)} ${formatCurrency(total)}`, - ].join('\n') + return [`Costs for: ${label}`, divider, rows, divider, ` ${'Total'.padEnd(40)} ${formatCurrency(total)}`].join('\n') } diff --git a/src/formatters/dotfiles.js b/src/formatters/dotfiles.js index 155005b..80a8c34 100644 --- a/src/formatters/dotfiles.js +++ b/src/formatters/dotfiles.js @@ -21,7 +21,9 @@ export function formatDotfilesSetup(result) { BORDER, '', chalk.bold(` Platform: ${chalk.cyan(result.platform)}`), - chalk.bold(` Status: ${result.status === 'success' ? chalk.green('success') : result.status === 'skipped' ? chalk.dim('skipped') : chalk.red('failed')}`), + chalk.bold( + ` Status: ${result.status === 'success' ? chalk.green('success') : result.status === 'skipped' ? chalk.dim('skipped') : chalk.red('failed')}`, + ), ] if (result.sourceDir) { @@ -79,11 +81,41 @@ export function formatDotfilesSummary(summary) { */ function inferCategory(filePath) { const lower = filePath.toLowerCase() - if (lower.includes('.ssh') || lower.includes('.gnupg') || lower.includes('gpg') || lower.includes('secret') || lower.includes('credential') || lower.includes('token') || lower.includes('password')) return 'Security' + if ( + lower.includes('.ssh') || + lower.includes('.gnupg') || + lower.includes('gpg') || + lower.includes('secret') || + lower.includes('credential') || + lower.includes('token') || + lower.includes('password') + ) + return 'Security' if (lower.includes('.gitconfig') || lower.includes('.gitignore') || lower.includes('.git')) return 'Git' - if (lower.includes('zshrc') || lower.includes('bashrc') || lower.includes('bash_profile') || lower.includes('zprofile') || lower.includes('fish')) return 'Shell' - if (lower.includes('vim') || lower.includes('nvim') || lower.includes('emacs') || lower.includes('vscode') || lower.includes('cursor')) return 'Editor' - if (lower.includes('brew') || lower.includes('npm') || lower.includes('yarn') || lower.includes('pip') || lower.includes('gem')) return 'Package' + if ( + lower.includes('zshrc') || + lower.includes('bashrc') || + lower.includes('bash_profile') || + lower.includes('zprofile') || + lower.includes('fish') + ) + return 'Shell' + if ( + lower.includes('vim') || + lower.includes('nvim') || + lower.includes('emacs') || + lower.includes('vscode') || + lower.includes('cursor') + ) + return 'Editor' + if ( + lower.includes('brew') || + lower.includes('npm') || + lower.includes('yarn') || + lower.includes('pip') || + lower.includes('gem') + ) + return 'Package' return 'Other' } @@ -167,13 +199,7 @@ export function formatDotfilesStatus(result) { * @returns {string} */ export function formatDotfilesAdd(result) { - const lines = [ - '', - BORDER, - chalk.bold(' Dotfiles Add — Summary'), - BORDER, - '', - ] + const lines = ['', BORDER, chalk.bold(' Dotfiles Add — Summary'), BORDER, ''] if (result.added.length > 0) { lines.push(chalk.bold(` Added (${result.added.length}):`)) @@ -219,12 +245,13 @@ export function formatDotfilesAdd(result) { * @returns {string} */ export function formatDotfilesSync(result) { - const actionLabel = { - push: 'Push', - pull: 'Pull', - 'init-remote': 'Remote Setup', - skipped: 'Skipped', - }[result.action] ?? result.action + const actionLabel = + { + push: 'Push', + pull: 'Pull', + 'init-remote': 'Remote Setup', + skipped: 'Skipped', + }[result.action] ?? result.action const lines = [ '', @@ -233,7 +260,9 @@ export function formatDotfilesSync(result) { BORDER, '', chalk.white(` Action: ${chalk.cyan(actionLabel)}`), - chalk.white(` Status: ${result.status === 'success' ? chalk.green('success') : result.status === 'skipped' ? chalk.dim('skipped') : chalk.red('failed')}`), + chalk.white( + ` Status: ${result.status === 'success' ? chalk.green('success') : result.status === 'skipped' ? chalk.dim('skipped') : chalk.red('failed')}`, + ), ] if (result.repo) { diff --git a/src/formatters/markdown.js b/src/formatters/markdown.js index 0a95127..247ea24 100644 --- a/src/formatters/markdown.js +++ b/src/formatters/markdown.js @@ -1,6 +1,6 @@ -import { marked } from 'marked' +import {marked} from 'marked' import chalk from 'chalk' -import { deflate } from 'pako' +import {deflate} from 'pako' // Custom terminal renderer — outputs ANSI-formatted text using chalk. // marked-terminal@7 is incompatible with all currently released versions of marked @@ -30,7 +30,12 @@ const terminalRenderer = { return '\n' + lines.join('\n') + '\n\n' }, blockquote(quote) { - return quote.split('\n').map((l) => chalk.dim('│ ') + chalk.italic(l)).join('\n') + '\n' + return ( + quote + .split('\n') + .map((l) => chalk.dim('│ ') + chalk.italic(l)) + .join('\n') + '\n' + ) }, link(href, _title, text) { return `${text} ${chalk.dim(`(${href})`)}` @@ -61,7 +66,7 @@ const terminalRenderer = { }, } -marked.use({ renderer: terminalRenderer }) +marked.use({renderer: terminalRenderer}) /** * Render a markdown string as ANSI-formatted terminal output. @@ -95,14 +100,14 @@ export function extractMermaidBlocks(content) { export function toMermaidLiveUrl(diagramCode) { const state = JSON.stringify({ code: diagramCode, - mermaid: JSON.stringify({ theme: 'default' }), + mermaid: JSON.stringify({theme: 'default'}), updateDiagram: true, grid: true, panZoom: true, rough: false, }) const data = new TextEncoder().encode(state) - const compressed = deflate(data, { level: 9 }) + const compressed = deflate(data, {level: 9}) const encoded = Buffer.from(compressed).toString('base64url') return `https://mermaid.live/view#pako:${encoded}` } diff --git a/src/formatters/openapi.js b/src/formatters/openapi.js index a755b29..18f74de 100644 --- a/src/formatters/openapi.js +++ b/src/formatters/openapi.js @@ -1,4 +1,4 @@ -import { load } from 'js-yaml' +import {load} from 'js-yaml' /** @import { APIEndpoint, AsyncChannel } from '../types.js' */ @@ -45,7 +45,7 @@ export function isAsyncApi(doc) { export function parseOpenApi(content) { const doc = parseYamlOrJson(content) if (!doc || !isOpenApi(doc)) { - return { endpoints: [], error: 'Not a valid OpenAPI/Swagger document' } + return {endpoints: [], error: 'Not a valid OpenAPI/Swagger document'} } /** @type {APIEndpoint[]} */ @@ -58,9 +58,7 @@ export function parseOpenApi(content) { if (!['get', 'post', 'put', 'patch', 'delete', 'head', 'options'].includes(method)) continue const operation = /** @type {Record} */ (op) const rawParams = /** @type {Array>} */ (operation.parameters ?? []) - const parameters = rawParams - .map((p) => (p.required ? `${p.name}*` : String(p.name))) - .join(', ') + const parameters = rawParams.map((p) => (p.required ? `${p.name}*` : String(p.name))).join(', ') endpoints.push({ method: method.toUpperCase(), path, @@ -70,7 +68,7 @@ export function parseOpenApi(content) { } } - return { endpoints, error: null } + return {endpoints, error: null} } /** @@ -81,7 +79,7 @@ export function parseOpenApi(content) { export function parseAsyncApi(content) { const doc = parseYamlOrJson(content) if (!doc || !isAsyncApi(doc)) { - return { channels: [], error: 'Not a valid AsyncAPI document' } + return {channels: [], error: 'Not a valid AsyncAPI document'} } /** @type {AsyncChannel[]} */ @@ -98,7 +96,7 @@ export function parseAsyncApi(content) { return String(ch.$ref ?? '').includes(channelName) || String(ch ?? '') === channelName }) if (matchingOps.length === 0) { - channels.push({ channel: channelName, operation: '—', summary: '', message: '—' }) + channels.push({channel: channelName, operation: '—', summary: '', message: '—'}) } for (const op of matchingOps) { const msgTitle = resolveMessageTitle(op.messages) @@ -129,7 +127,7 @@ export function parseAsyncApi(content) { } } - return { channels, error: null } + return {channels, error: null} } /** diff --git a/src/formatters/prompts.js b/src/formatters/prompts.js index f4dbe1b..311c0b5 100644 --- a/src/formatters/prompts.js +++ b/src/formatters/prompts.js @@ -1,6 +1,6 @@ import chalk from 'chalk' -import { marked } from 'marked' -import { renderTable } from './table.js' +import {marked} from 'marked' +import {renderTable} from './table.js' /** @import { Prompt, Skill, AwesomeEntry } from '../types.js' */ @@ -16,31 +16,28 @@ export function formatPromptTable(prompts) { return chalk.dim('No prompts found.') } - return renderTable( - /** @type {Record[]} */ (prompts), - [ - { - header: 'Title', - key: 'title', - width: 36, - colorize: (v) => chalk.hex('#FF9A5C')(v), - }, - { - header: 'Category', - key: 'category', - width: 16, - format: (v) => v ?? '—', - colorize: (v) => chalk.hex('#4A9EFF')(v), - }, - { - header: 'Description', - key: 'description', - width: 60, - format: (v) => v ?? '—', - colorize: (v) => chalk.white(v), - }, - ], - ) + return renderTable(/** @type {Record[]} */ (prompts), [ + { + header: 'Title', + key: 'title', + width: 36, + colorize: (v) => chalk.hex('#FF9A5C')(v), + }, + { + header: 'Category', + key: 'category', + width: 16, + format: (v) => v ?? '—', + colorize: (v) => chalk.hex('#4A9EFF')(v), + }, + { + header: 'Description', + key: 'description', + width: 60, + format: (v) => v ?? '—', + colorize: (v) => chalk.white(v), + }, + ]) } /** @@ -63,7 +60,7 @@ export function formatPromptBody(prompt) { .join('\n') // Render markdown to plain terminal text by stripping HTML tags from marked output - const rendered = marked(prompt.body, { async: false }) + const rendered = marked(prompt.body, {async: false}) const plain = String(rendered) .replace(/<[^>]+>/g, '') .replace(/&/g, '&') @@ -91,31 +88,28 @@ export function formatSkillTable(skills) { return chalk.dim('No skills found.') } - return renderTable( - /** @type {Record[]} */ (skills), - [ - { - header: 'Name', - key: 'name', - width: 36, - colorize: (v) => chalk.hex('#FF9A5C')(v), - }, - { - header: 'Installs', - key: 'installs', - width: 10, - format: (v) => (v != null ? String(v) : '—'), - colorize: (v) => chalk.hex('#4A9EFF')(v), - }, - { - header: 'Description', - key: 'description', - width: 60, - format: (v) => v ?? '—', - colorize: (v) => chalk.white(v), - }, - ], - ) + return renderTable(/** @type {Record[]} */ (skills), [ + { + header: 'Name', + key: 'name', + width: 36, + colorize: (v) => chalk.hex('#FF9A5C')(v), + }, + { + header: 'Installs', + key: 'installs', + width: 10, + format: (v) => (v != null ? String(v) : '—'), + colorize: (v) => chalk.hex('#4A9EFF')(v), + }, + { + header: 'Description', + key: 'description', + width: 60, + format: (v) => v ?? '—', + colorize: (v) => chalk.white(v), + }, + ]) } /** @@ -131,29 +125,26 @@ export function formatAwesomeTable(entries, category) { return chalk.dim(category ? `No entries found for category "${category}".` : 'No entries found.') } - return renderTable( - /** @type {Record[]} */ (entries), - [ - { - header: 'Name', - key: 'name', - width: 36, - colorize: (v) => chalk.hex('#FF9A5C')(v), - }, - { - header: 'Category', - key: 'category', - width: 14, - format: (v) => v ?? '—', - colorize: (v) => chalk.hex('#4A9EFF')(v), - }, - { - header: 'Description', - key: 'description', - width: 58, - format: (v) => v ?? '—', - colorize: (v) => chalk.white(v), - }, - ], - ) + return renderTable(/** @type {Record[]} */ (entries), [ + { + header: 'Name', + key: 'name', + width: 36, + colorize: (v) => chalk.hex('#FF9A5C')(v), + }, + { + header: 'Category', + key: 'category', + width: 14, + format: (v) => v ?? '—', + colorize: (v) => chalk.hex('#4A9EFF')(v), + }, + { + header: 'Description', + key: 'description', + width: 58, + format: (v) => v ?? '—', + colorize: (v) => chalk.white(v), + }, + ]) } diff --git a/src/formatters/security.js b/src/formatters/security.js index 6b3fbca..c5f785e 100644 --- a/src/formatters/security.js +++ b/src/formatters/security.js @@ -1,5 +1,5 @@ import chalk from 'chalk' -import { deriveOverallStatus } from '../services/security.js' +import {deriveOverallStatus} from '../services/security.js' /** @import { SetupSession, SecurityToolStatus, PlatformInfo } from '../types.js' */ @@ -116,4 +116,4 @@ export function formatSecuritySummary(session, platformInfo) { * @param {SecurityToolStatus[]} tools * @returns {'success'|'partial'|'not-configured'} */ -export { deriveOverallStatus } +export {deriveOverallStatus} diff --git a/src/formatters/status.js b/src/formatters/status.js index da723a3..c6e1a2d 100644 --- a/src/formatters/status.js +++ b/src/formatters/status.js @@ -1,5 +1,5 @@ import chalk from 'chalk' -import { colorStatus } from './table.js' +import {colorStatus} from './table.js' /** @import { DoctorCheck } from '../types.js' */ diff --git a/src/formatters/table.js b/src/formatters/table.js index aac48c5..077e039 100644 --- a/src/formatters/table.js +++ b/src/formatters/table.js @@ -51,9 +51,7 @@ export function renderTable(rows, columns) { }) // Header row - const header = columns - .map((col, i) => chalk.bold.white(col.header.padEnd(widths[i]))) - .join(' ') + const header = columns.map((col, i) => chalk.bold.white(col.header.padEnd(widths[i]))).join(' ') // Divider const divider = chalk.dim(widths.map((w) => '─'.repeat(w)).join(' ')) diff --git a/src/formatters/vuln.js b/src/formatters/vuln.js index 9134465..61e19df 100644 --- a/src/formatters/vuln.js +++ b/src/formatters/vuln.js @@ -1,6 +1,6 @@ import chalk from 'chalk' -import { renderTable } from './table.js' -import { NVD_ATTRIBUTION } from '../services/nvd.js' +import {renderTable} from './table.js' +import {NVD_ATTRIBUTION} from '../services/nvd.js' /** @import { CveSearchResult, CveDetail, VulnerabilityFinding, ScanResult } from '../types.js' */ @@ -11,11 +11,16 @@ import { NVD_ATTRIBUTION } from '../services/nvd.js' */ export function colorSeverity(severity) { switch (severity) { - case 'Critical': return chalk.red.bold(severity) - case 'High': return chalk.red(severity) - case 'Medium': return chalk.yellow(severity) - case 'Low': return chalk.blue(severity) - default: return chalk.gray(severity) + case 'Critical': + return chalk.red.bold(severity) + case 'High': + return chalk.red(severity) + case 'Medium': + return chalk.yellow(severity) + case 'Low': + return chalk.blue(severity) + default: + return chalk.gray(severity) } } @@ -83,16 +88,21 @@ export function formatCveSearchTable(results, keyword, days, totalResults) { })) const table = renderTable(rows, [ - { header: 'CVE ID', key: 'id', colorize: (v) => chalk.cyan(v) }, - { header: 'Severity', key: 'severity', colorize: (v) => colorSeverity(v) }, - { header: 'Score', key: 'score', width: 5 }, - { header: 'Published', key: 'published', width: 10 }, - { header: 'Description', key: 'description', width: 90 }, - { header: 'Reference', key: 'reference', width: 45 }, + {header: 'CVE ID', key: 'id', colorize: (v) => chalk.cyan(v)}, + {header: 'Severity', key: 'severity', colorize: (v) => colorSeverity(v)}, + {header: 'Score', key: 'score', width: 5}, + {header: 'Published', key: 'published', width: 10}, + {header: 'Description', key: 'description', width: 90}, + {header: 'Reference', key: 'reference', width: 45}, ]) // Indent table by 2 spaces - lines.push(table.split('\n').map((l) => ` ${l}`).join('\n')) + lines.push( + table + .split('\n') + .map((l) => ` ${l}`) + .join('\n'), + ) lines.push('') lines.push(`Showing ${results.length} of ${totalResults} results.`) lines.push(chalk.dim(NVD_ATTRIBUTION)) @@ -250,14 +260,17 @@ export function formatFindingsTable(findings) { })) const table = renderTable(rows, [ - { header: 'Package', key: 'pkg', width: 20 }, - { header: 'Version', key: 'version', width: 12 }, - { header: 'Severity', key: 'severity', colorize: (v) => colorSeverity(v) }, - { header: 'CVE', key: 'cve', colorize: (v) => (v !== '—' ? chalk.cyan(v) : chalk.gray(v)) }, - { header: 'Title', key: 'title', width: 40 }, + {header: 'Package', key: 'pkg', width: 20}, + {header: 'Version', key: 'version', width: 12}, + {header: 'Severity', key: 'severity', colorize: (v) => colorSeverity(v)}, + {header: 'CVE', key: 'cve', colorize: (v) => (v !== '—' ? chalk.cyan(v) : chalk.gray(v))}, + {header: 'Title', key: 'title', width: 40}, ]) - return table.split('\n').map((l) => ` ${l}`).join('\n') + return table + .split('\n') + .map((l) => ` ${l}`) + .join('\n') } /** diff --git a/src/help.js b/src/help.js index 1bf99b5..29ed34e 100644 --- a/src/help.js +++ b/src/help.js @@ -1,13 +1,13 @@ -import { Help } from '@oclif/core' +import {Help} from '@oclif/core' import chalk from 'chalk' -import { isColorEnabled } from './utils/gradient.js' -import { printBanner } from './utils/banner.js' +import {isColorEnabled} from './utils/gradient.js' +import {printBanner} from './utils/banner.js' // ─── Brand palette (flat — no gradient on help rows) ──────────────────────── -const ORANGE = '#FF6B2B' +const ORANGE = '#FF6B2B' const LIGHT_ORANGE = '#FF9A5C' -const DIM_BLUE = '#4A9EFF' -const DIM_GRAY = '#888888' +const DIM_BLUE = '#4A9EFF' +const DIM_GRAY = '#888888' // Strip ANSI escape codes const ANSI_RE = /\x1B\[[0-?]*[ -/]*[@-~]/g @@ -27,92 +27,91 @@ const CATEGORIES = [ { title: 'GitHub & Documentazione', cmds: [ - { id: 'repo:list', hint: '[--language] [--search]' }, - { id: 'docs:read', hint: '[FILE] [--repo] [--raw] [--render]' }, - { id: 'docs:list', hint: '[--repo] [--search]' }, - { id: 'docs:search', hint: ' [--repo]' }, - { id: 'docs:projects', hint: '[--search]' }, - { id: 'create:repo', hint: '[TEMPLATE] [--list] [--name]' }, - { id: 'search', hint: '' }, - { id: 'open', hint: '' }, + {id: 'repo:list', hint: '[--language] [--search]'}, + {id: 'docs:read', hint: '[FILE] [--repo] [--raw] [--render]'}, + {id: 'docs:list', hint: '[--repo] [--search]'}, + {id: 'docs:search', hint: ' [--repo]'}, + {id: 'docs:projects', hint: '[--search]'}, + {id: 'create:repo', hint: '[TEMPLATE] [--list] [--name]'}, + {id: 'search', hint: ''}, + {id: 'open', hint: ''}, ], }, { title: 'Pull Request', cmds: [ - { id: 'pr:create', hint: '' }, - { id: 'pr:status', hint: '' }, - { id: 'pr:detail', hint: ' --repo ' }, - { id: 'pr:review', hint: '' }, + {id: 'pr:create', hint: ''}, + {id: 'pr:status', hint: ''}, + {id: 'pr:detail', hint: ' --repo '}, + {id: 'pr:review', hint: ''}, ], }, { title: 'Pipeline & DevOps', cmds: [ - { id: 'pipeline:status', hint: '[--repo] [--branch]' }, - { id: 'pipeline:rerun', hint: ' --repo ' }, - { id: 'pipeline:logs', hint: ' --repo ' }, - { id: 'changelog', hint: '' }, + {id: 'pipeline:status', hint: '[--repo] [--branch]'}, + {id: 'pipeline:rerun', hint: ' --repo '}, + {id: 'pipeline:logs', hint: ' --repo '}, + {id: 'changelog', hint: ''}, ], }, { title: 'Tasks (ClickUp)', cmds: [ - { id: 'tasks:list', hint: '[--status] [--search]' }, - { id: 'tasks:today', hint: '' }, - { id: 'tasks:assigned', hint: '[--status] [--search]' }, + {id: 'tasks:list', hint: '[--status] [--search]'}, + {id: 'tasks:today', hint: ''}, + {id: 'tasks:assigned', hint: '[--status] [--search]'}, ], }, { title: 'Cloud & Costi', cmds: [ - { id: 'costs:get', hint: '[SERVICE] [--period] [--group-by] [--tag-key]' }, - { id: 'costs:trend', hint: '[--group-by] [--tag-key] [--line]' }, - { id: 'logs', hint: '[--group] [--filter] [--since] [--limit] [--region]' }, + {id: 'costs:get', hint: '[SERVICE] [--period] [--group-by] [--tag-key]'}, + {id: 'costs:trend', hint: '[--group-by] [--tag-key] [--line]'}, + {id: 'logs', hint: '[--group] [--filter] [--since] [--limit] [--region]'}, ], }, { title: 'AI Prompts', cmds: [ - { id: 'prompts:list', hint: '[--filter]' }, - { id: 'prompts:download', hint: ' [--overwrite]' }, - { id: 'prompts:browse', hint: '[--source] [--query] [--category]' }, - { id: 'prompts:install-speckit', hint: '[--force]' }, - { id: 'prompts:run', hint: '[PATH] [--tool]' }, + {id: 'prompts:list', hint: '[--filter]'}, + {id: 'prompts:download', hint: ' [--overwrite]'}, + {id: 'prompts:browse', hint: '[--source] [--query] [--category]'}, + {id: 'prompts:install-speckit', hint: '[--force]'}, + {id: 'prompts:run', hint: '[PATH] [--tool]'}, + {id: 'sync-config-ai', hint: '[--json]'}, ], }, { title: 'Sicurezza & Credenziali', - cmds: [ - { id: 'security:setup', hint: '[--json]' }, - ], + cmds: [{id: 'security:setup', hint: '[--json]'}], }, { title: 'CVE & Vulnerabilità', cmds: [ - { id: 'vuln:search', hint: '[KEYWORD] [--days] [--severity] [--limit]' }, - { id: 'vuln:detail', hint: ' [--open]' }, - { id: 'vuln:scan', hint: '[--severity] [--no-fail] [--report]' }, + {id: 'vuln:search', hint: '[KEYWORD] [--days] [--severity] [--limit]'}, + {id: 'vuln:detail', hint: ' [--open]'}, + {id: 'vuln:scan', hint: '[--severity] [--no-fail] [--report]'}, ], }, { title: 'Dotfiles & Cifratura', cmds: [ - { id: 'dotfiles:setup', hint: '[--json]' }, - { id: 'dotfiles:add', hint: '[FILES...] [--encrypt]' }, - { id: 'dotfiles:status', hint: '[--json]' }, - { id: 'dotfiles:sync', hint: '[--push] [--pull] [--dry-run]' }, + {id: 'dotfiles:setup', hint: '[--json]'}, + {id: 'dotfiles:add', hint: '[FILES...] [--encrypt]'}, + {id: 'dotfiles:status', hint: '[--json]'}, + {id: 'dotfiles:sync', hint: '[--push] [--pull] [--dry-run]'}, ], }, { title: 'Setup & Ambiente', cmds: [ - { id: 'init', hint: '[--dry-run]' }, - { id: 'doctor', hint: '' }, - { id: 'auth:login', hint: '' }, - { id: 'whoami', hint: '' }, - { id: 'welcome', hint: '' }, - { id: 'upgrade', hint: '' }, + {id: 'init', hint: '[--dry-run]'}, + {id: 'doctor', hint: ''}, + {id: 'auth:login', hint: ''}, + {id: 'whoami', hint: ''}, + {id: 'welcome', hint: ''}, + {id: 'upgrade', hint: ''}, ], }, ] @@ -126,31 +125,30 @@ const CATEGORIES = [ * - Gradient solo sul logo; tutto il resto usa colori flat chalk */ export default class CustomHelp extends Help { + /** + * Root help override: banner animato → layout categorizzato. + * Override di showRootHelp() (async) per evitare che formatRoot() (sync) + * debba attendere la Promise del banner. + * @returns {Promise} + */ + async showRootHelp() { + // Animated logo — identical to `dvmi init` (no-ops in CI/non-TTY) + await printBanner() + + // Version check: uses cached result (populated by init hook) — 800 ms timeout + let versionInfo = null + try { + const {checkForUpdate} = await import('./services/version-check.js') + versionInfo = await Promise.race([ + checkForUpdate(), + new Promise((resolve) => setTimeout(() => resolve(null), 800)), + ]) + } catch { + // never block help output + } - /** - * Root help override: banner animato → layout categorizzato. - * Override di showRootHelp() (async) per evitare che formatRoot() (sync) - * debba attendere la Promise del banner. - * @returns {Promise} - */ - async showRootHelp() { - // Animated logo — identical to `dvmi init` (no-ops in CI/non-TTY) - await printBanner() - - // Version check: uses cached result (populated by init hook) — 800 ms timeout - let versionInfo = null - try { - const { checkForUpdate } = await import('./services/version-check.js') - versionInfo = await Promise.race([ - checkForUpdate(), - new Promise((resolve) => setTimeout(() => resolve(null), 800)), - ]) - } catch { - // never block help output - } - - this.log(this.#buildRootLayout(versionInfo)) - } + this.log(this.#buildRootLayout(versionInfo)) + } /** * @param {import('@oclif/core').Interfaces.Topic[]} topics @@ -179,69 +177,64 @@ export default class CustomHelp extends Help { // ─── Private helpers ────────────────────────────────────────────────────── - /** - * Build the full categorized root help layout. - * @param {{ hasUpdate: boolean, current: string, latest: string|null }|null} [versionInfo] - * @returns {string} - */ + /** + * Build the full categorized root help layout. + * @param {{ hasUpdate: boolean, current: string, latest: string|null }|null} [versionInfo] + * @returns {string} + */ #buildRootLayout(versionInfo = null) { /** @type {Map} */ const cmdMap = new Map(this.config.commands.map((c) => [c.id, c])) /** @type {Array<{cmd: string, note: string}>} */ const EXAMPLES = [ - { cmd: 'dvmi prompts list', note: 'Sfoglia prompt AI dal tuo repository' }, - { cmd: 'dvmi prompts list --filter refactor', note: 'Filtra prompt per parola chiave' }, - { cmd: 'dvmi prompts download coding/refactor-prompt.md', note: 'Scarica un prompt localmente' }, - { cmd: 'dvmi prompts browse skills --query refactor', note: 'Cerca skill su skills.sh' }, - { cmd: 'dvmi prompts browse awesome --category agents', note: 'Sfoglia awesome-copilot agents' }, - { cmd: 'dvmi prompts run coding/refactor-prompt.md --tool opencode', note: 'Esegui un prompt con opencode' }, - { cmd: 'dvmi docs read', note: 'Leggi il README del repo corrente' }, - { cmd: 'dvmi docs search "authentication"', note: 'Cerca nei docs del repo corrente' }, - { cmd: 'dvmi repo list --search "api"', note: 'Filtra repository per nome' }, - { cmd: 'dvmi pr status', note: 'PR aperte e review in attesa' }, - { cmd: 'dvmi pipeline status', note: 'Ultimi workflow CI/CD' }, - { cmd: 'dvmi tasks list --search "bug"', note: 'Cerca task ClickUp' }, - { cmd: 'dvmi tasks today', note: 'Task in lavorazione oggi' }, - { cmd: 'dvmi costs get --period mtd', note: 'Costi AWS mese corrente per servizio' }, - { cmd: 'dvmi costs get --group-by tag --tag-key env', note: 'Costi raggruppati per tag env' }, - { cmd: 'dvmi costs trend --line', note: 'Trend costi 2 mesi (grafico lineare)' }, - { cmd: 'dvmi costs get --json', note: 'Costi AWS in formato JSON' }, - { cmd: 'dvmi logs', note: 'Sfoglia log CloudWatch in modo interattivo' }, - { cmd: 'dvmi logs --group /aws/lambda/my-fn --since 24h', note: 'Log Lambda ultimi 24h' }, - { cmd: 'dvmi logs --group /aws/lambda/my-fn --filter "ERROR"', note: 'Filtra eventi ERROR su un log group' }, - { cmd: 'dvmi security setup --json', note: 'Controlla lo stato degli strumenti di sicurezza' }, - { cmd: 'dvmi security setup', note: 'Wizard interattivo: installa aws-vault e GCM' }, - { cmd: 'dvmi dotfiles setup', note: 'Configura chezmoi con cifratura age' }, - { cmd: 'dvmi dotfiles add ~/.zshrc ~/.gitconfig', note: 'Aggiungi dotfile a chezmoi' }, - { cmd: 'dvmi dotfiles status --json', note: 'Stato dotfile gestiti (JSON)' }, - { cmd: 'dvmi dotfiles sync --push', note: 'Push dotfile al repository remoto' }, - { cmd: 'dvmi welcome', note: 'Dashboard missione dvmi con intro animata' }, - { cmd: 'dvmi vuln search openssl', note: 'Cerca CVE recenti per keyword' }, - { cmd: 'dvmi vuln search log4j --days 30 --severity critical', note: 'CVE critiche Log4j negli ultimi 30 giorni' }, - { cmd: 'dvmi vuln detail CVE-2021-44228', note: 'Dettaglio completo di una CVE' }, - { cmd: 'dvmi vuln detail CVE-2021-44228 --open', note: 'Apri la prima referenza nel browser' }, - { cmd: 'dvmi vuln scan', note: 'Scansiona dipendenze del progetto corrente' }, - { cmd: 'dvmi vuln scan --severity high --no-fail', note: 'Scansione senza bloccare CI (solo high+)' }, - { cmd: 'dvmi vuln scan --report ./vuln-report.md', note: 'Esporta report Markdown delle vulnerabilità' }, + {cmd: 'dvmi prompts list', note: 'Sfoglia prompt AI dal tuo repository'}, + {cmd: 'dvmi prompts list --filter refactor', note: 'Filtra prompt per parola chiave'}, + {cmd: 'dvmi prompts download coding/refactor-prompt.md', note: 'Scarica un prompt localmente'}, + {cmd: 'dvmi prompts browse skills --query refactor', note: 'Cerca skill su skills.sh'}, + {cmd: 'dvmi prompts browse awesome --category agents', note: 'Sfoglia awesome-copilot agents'}, + {cmd: 'dvmi prompts run coding/refactor-prompt.md --tool opencode', note: 'Esegui un prompt con opencode'}, + {cmd: 'dvmi docs read', note: 'Leggi il README del repo corrente'}, + {cmd: 'dvmi docs search "authentication"', note: 'Cerca nei docs del repo corrente'}, + {cmd: 'dvmi repo list --search "api"', note: 'Filtra repository per nome'}, + {cmd: 'dvmi pr status', note: 'PR aperte e review in attesa'}, + {cmd: 'dvmi pipeline status', note: 'Ultimi workflow CI/CD'}, + {cmd: 'dvmi tasks list --search "bug"', note: 'Cerca task ClickUp'}, + {cmd: 'dvmi tasks today', note: 'Task in lavorazione oggi'}, + {cmd: 'dvmi costs get --period mtd', note: 'Costi AWS mese corrente per servizio'}, + {cmd: 'dvmi costs get --group-by tag --tag-key env', note: 'Costi raggruppati per tag env'}, + {cmd: 'dvmi costs trend --line', note: 'Trend costi 2 mesi (grafico lineare)'}, + {cmd: 'dvmi costs get --json', note: 'Costi AWS in formato JSON'}, + {cmd: 'dvmi logs', note: 'Sfoglia log CloudWatch in modo interattivo'}, + {cmd: 'dvmi logs --group /aws/lambda/my-fn --since 24h', note: 'Log Lambda ultimi 24h'}, + {cmd: 'dvmi logs --group /aws/lambda/my-fn --filter "ERROR"', note: 'Filtra eventi ERROR su un log group'}, + {cmd: 'dvmi security setup --json', note: 'Controlla lo stato degli strumenti di sicurezza'}, + {cmd: 'dvmi security setup', note: 'Wizard interattivo: installa aws-vault e GCM'}, + {cmd: 'dvmi dotfiles setup', note: 'Configura chezmoi con cifratura age'}, + {cmd: 'dvmi dotfiles add ~/.zshrc ~/.gitconfig', note: 'Aggiungi dotfile a chezmoi'}, + {cmd: 'dvmi dotfiles status --json', note: 'Stato dotfile gestiti (JSON)'}, + {cmd: 'dvmi dotfiles sync --push', note: 'Push dotfile al repository remoto'}, + {cmd: 'dvmi welcome', note: 'Dashboard missione dvmi con intro animata'}, + {cmd: 'dvmi vuln search openssl', note: 'Cerca CVE recenti per keyword'}, + {cmd: 'dvmi vuln search log4j --days 30 --severity critical', note: 'CVE critiche Log4j negli ultimi 30 giorni'}, + {cmd: 'dvmi vuln detail CVE-2021-44228', note: 'Dettaglio completo di una CVE'}, + {cmd: 'dvmi vuln detail CVE-2021-44228 --open', note: 'Apri la prima referenza nel browser'}, + {cmd: 'dvmi vuln scan', note: 'Scansiona dipendenze del progetto corrente'}, + {cmd: 'dvmi vuln scan --severity high --no-fail', note: 'Scansione senza bloccare CI (solo high+)'}, + {cmd: 'dvmi vuln scan --report ./vuln-report.md', note: 'Esporta report Markdown delle vulnerabilità'}, ] const lines = [] - // ── Usage ────────────────────────────────────────────────────────────── - lines.push(this.#sectionHeader('USAGE')) - lines.push( - ' ' + (isColorEnabled ? chalk.hex(ORANGE).bold('dvmi') : 'dvmi') + - chalk.dim(' [FLAGS]\n'), - ) + // ── Usage ────────────────────────────────────────────────────────────── + lines.push(this.#sectionHeader('USAGE')) + lines.push(' ' + (isColorEnabled ? chalk.hex(ORANGE).bold('dvmi') : 'dvmi') + chalk.dim(' [FLAGS]\n')) // ── Comandi per categoria ────────────────────────────────────────────── lines.push(this.#sectionHeader('COMMANDS')) for (const cat of CATEGORIES) { - lines.push( - ' ' + (isColorEnabled ? chalk.hex(ORANGE).bold(cat.title) : cat.title), - ) + lines.push(' ' + (isColorEnabled ? chalk.hex(ORANGE).bold(cat.title) : cat.title)) for (const entry of cat.cmds) { const cmd = cmdMap.get(entry.id) @@ -249,17 +242,14 @@ export default class CustomHelp extends Help { const displayId = entry.id.replaceAll(':', ' ') const hint = entry.hint || '' - const desc = cmd.summary ?? (typeof cmd.description === 'string' - ? cmd.description.split('\n')[0] - : '') + const desc = cmd.summary ?? (typeof cmd.description === 'string' ? cmd.description.split('\n')[0] : '') // Left column (name + flags hint), right-padded to align descriptions const rawLeft = ' ' + displayId + (hint ? ' ' + hint : '') const pad = ' '.repeat(Math.max(2, 50 - rawLeft.length)) const leftPart = isColorEnabled - ? ' ' + chalk.hex(LIGHT_ORANGE).bold(displayId) + - (hint ? ' ' + chalk.dim(hint) : '') + ? ' ' + chalk.hex(LIGHT_ORANGE).bold(displayId) + (hint ? ' ' + chalk.dim(hint) : '') : rawLeft lines.push(leftPart + pad + chalk.dim(desc)) @@ -270,8 +260,8 @@ export default class CustomHelp extends Help { // ── Flag globali ─────────────────────────────────────────────────────── lines.push(this.#sectionHeader('GLOBAL FLAGS')) - lines.push(this.#flagLine('-h, --help', 'Mostra aiuto per un comando')) - lines.push(this.#flagLine(' --json', 'Output in formato JSON strutturato')) + lines.push(this.#flagLine('-h, --help', 'Mostra aiuto per un comando')) + lines.push(this.#flagLine(' --json', 'Output in formato JSON strutturato')) lines.push(this.#flagLine('-v, --version', 'Versione installata')) lines.push('') @@ -281,39 +271,49 @@ export default class CustomHelp extends Help { const maxCmdLen = Math.max(...EXAMPLES.map((e) => e.cmd.length)) for (const ex of EXAMPLES) { const pad = ' '.repeat(maxCmdLen - ex.cmd.length + 4) - const sub = ex.cmd.replace(/^dvmi /, '') - const formatted = isColorEnabled - ? chalk.dim('$') + ' ' + chalk.hex(ORANGE).bold('dvmi') + ' ' + - chalk.white(sub) + pad + chalk.hex(DIM_GRAY)(ex.note) - : '$ ' + ex.cmd + pad + ex.note + const sub = ex.cmd.replace(/^dvmi /, '') + const formatted = isColorEnabled + ? chalk.dim('$') + + ' ' + + chalk.hex(ORANGE).bold('dvmi') + + ' ' + + chalk.white(sub) + + pad + + chalk.hex(DIM_GRAY)(ex.note) + : '$ ' + ex.cmd + pad + ex.note lines.push(' ' + formatted) } - lines.push('') - - // ── Versione + update notice ─────────────────────────────────────────── - const current = versionInfo?.current ?? this.config.version - const versionStr = isColorEnabled - ? chalk.dim('version ') + chalk.hex(DIM_BLUE)(current) - : `version ${current}` - - if (versionInfo?.hasUpdate && versionInfo.latest) { - const updateStr = isColorEnabled - ? chalk.yellow('update disponibile: ') + - chalk.dim(current) + chalk.yellow(' → ') + chalk.green(versionInfo.latest) + - chalk.dim(' (esegui ') + chalk.hex(LIGHT_ORANGE)('dvmi upgrade') + chalk.dim(')') - : `update disponibile: ${current} → ${versionInfo.latest} (esegui dvmi upgrade)` - lines.push(' ' + versionStr + chalk.dim(' · ') + updateStr) - } else { - lines.push(' ' + versionStr) - } - - lines.push( - ' ' + chalk.dim('Approfondisci:') + ' ' + - chalk.hex(DIM_BLUE)('dvmi --help') + - chalk.dim(' · ') + - chalk.hex(DIM_BLUE)('dvmi --help') + '\n', - ) + lines.push('') + + // ── Versione + update notice ─────────────────────────────────────────── + const current = versionInfo?.current ?? this.config.version + const versionStr = isColorEnabled ? chalk.dim('version ') + chalk.hex(DIM_BLUE)(current) : `version ${current}` + + if (versionInfo?.hasUpdate && versionInfo.latest) { + const updateStr = isColorEnabled + ? chalk.yellow('update disponibile: ') + + chalk.dim(current) + + chalk.yellow(' → ') + + chalk.green(versionInfo.latest) + + chalk.dim(' (esegui ') + + chalk.hex(LIGHT_ORANGE)('dvmi upgrade') + + chalk.dim(')') + : `update disponibile: ${current} → ${versionInfo.latest} (esegui dvmi upgrade)` + lines.push(' ' + versionStr + chalk.dim(' · ') + updateStr) + } else { + lines.push(' ' + versionStr) + } + + lines.push( + ' ' + + chalk.dim('Approfondisci:') + + ' ' + + chalk.hex(DIM_BLUE)('dvmi --help') + + chalk.dim(' · ') + + chalk.hex(DIM_BLUE)('dvmi --help') + + '\n', + ) return lines.join('\n') } @@ -379,12 +379,10 @@ export default class CustomHelp extends Help { const plain = strip(line) if (!plain.trim()) return line - // Example lines: "$ dvmi …" - if (plain.includes('$ dvmi') || plain.trim().startsWith('$ dvmi')) { - return plain.replace(/\$ (dvmi\S*)/g, (_, cmd) => - '$ ' + chalk.hex(ORANGE).bold(cmd), - ) - } + // Example lines: "$ dvmi …" + if (plain.includes('$ dvmi') || plain.trim().startsWith('$ dvmi')) { + return plain.replace(/\$ (dvmi\S*)/g, (_, cmd) => '$ ' + chalk.hex(ORANGE).bold(cmd)) + } // Flag rows: "--flag desc" or "-f, --flag desc" const flagMatch = plain.match(/^(\s{2,})((?:-\w,\s*)?--[\w-]+)(\s+)(.*)$/) diff --git a/src/hooks/init.js b/src/hooks/init.js index dc8b531..4b73ecc 100644 --- a/src/hooks/init.js +++ b/src/hooks/init.js @@ -3,7 +3,5 @@ */ export const init = async () => { // Fire-and-forget version check — result used by postrun hook - import('../services/version-check.js') - .then(({ checkForUpdate }) => checkForUpdate()) - .catch(() => null) // never block command execution + import('../services/version-check.js').then(({checkForUpdate}) => checkForUpdate()).catch(() => null) // never block command execution } diff --git a/src/hooks/postrun.js b/src/hooks/postrun.js index b78cb9f..341849f 100644 --- a/src/hooks/postrun.js +++ b/src/hooks/postrun.js @@ -5,13 +5,11 @@ import chalk from 'chalk' */ export const postrun = async () => { try { - const { checkForUpdate } = await import('../services/version-check.js') - const { hasUpdate, current, latest } = await checkForUpdate() - if (hasUpdate && latest) { - process.stderr.write( - chalk.dim(`\nUpdate available: ${current} → ${chalk.green(latest)}. Run \`dvmi upgrade\`\n`), - ) - } + const {checkForUpdate} = await import('../services/version-check.js') + const {hasUpdate, current, latest} = await checkForUpdate() + if (hasUpdate && latest) { + process.stderr.write(chalk.dim(`\nUpdate available: ${current} → ${chalk.green(latest)}. Run \`dvmi upgrade\`\n`)) + } } catch { // Never interrupt user flow } diff --git a/src/index.js b/src/index.js index 0237950..e32b0b2 100644 --- a/src/index.js +++ b/src/index.js @@ -1 +1 @@ -export { run } from '@oclif/core' +export {run} from '@oclif/core' diff --git a/src/services/ai-config-store.js b/src/services/ai-config-store.js new file mode 100644 index 0000000..1875146 --- /dev/null +++ b/src/services/ai-config-store.js @@ -0,0 +1,318 @@ +import {readFile, writeFile, mkdir, chmod} from 'node:fs/promises' +import {existsSync} from 'node:fs' +import {join, dirname} from 'node:path' +import {homedir} from 'node:os' +import {randomUUID} from 'node:crypto' + +import {DvmiError} from '../utils/errors.js' +import {exec} from './shell.js' +import {loadConfig} from './config.js' + +/** @import { AIConfigStore, CategoryEntry, CategoryType, EnvironmentId, MCPParams, CommandParams, SkillParams, AgentParams } from '../types.js' */ + +// ────────────────────────────────────────────────────────────────────────────── +// Path resolution +// ────────────────────────────────────────────────────────────────────────────── + +const CONFIG_DIR = process.env.XDG_CONFIG_HOME + ? join(process.env.XDG_CONFIG_HOME, 'dvmi') + : join(homedir(), '.config', 'dvmi') + +export const AI_CONFIG_PATH = join(CONFIG_DIR, 'ai-config.json') + +// ────────────────────────────────────────────────────────────────────────────── +// Compatibility matrix +// ────────────────────────────────────────────────────────────────────────────── + +/** @type {Record} */ +const COMPATIBILITY = { + 'vscode-copilot': ['mcp', 'command', 'skill', 'agent'], + 'claude-code': ['mcp', 'command', 'skill', 'agent'], + opencode: ['mcp', 'command', 'skill', 'agent'], + 'gemini-cli': ['mcp', 'command'], + 'copilot-cli': ['mcp', 'command', 'skill', 'agent'], +} + +/** All known environment IDs. */ +const KNOWN_ENVIRONMENTS = /** @type {EnvironmentId[]} */ (Object.keys(COMPATIBILITY)) + +/** Regex for filename-unsafe characters. */ +const UNSAFE_CHARS = /[/\\:*?"<>|]/ + +// ────────────────────────────────────────────────────────────────────────────── +// Default store +// ────────────────────────────────────────────────────────────────────────────── + +/** @returns {AIConfigStore} */ +function defaultStore() { + return {version: 1, entries: []} +} + +// ────────────────────────────────────────────────────────────────────────────── +// Validation helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Assert that a name is non-empty and contains no filename-unsafe characters. + * @param {string} name + * @returns {void} + */ +function validateName(name) { + if (!name || typeof name !== 'string' || name.trim() === '') { + throw new DvmiError( + 'Entry name must be a non-empty string', + 'Provide a valid name for the entry, e.g. "my-mcp-server"', + ) + } + if (UNSAFE_CHARS.test(name)) { + throw new DvmiError( + `Entry name "${name}" contains invalid characters`, + 'Remove characters like / \\ : * ? " < > | from the name', + ) + } +} + +/** + * Assert that all environment IDs are compatible with the given entry type. + * @param {EnvironmentId[]} environments + * @param {CategoryType} type + * @returns {void} + */ +function validateEnvironments(environments, type) { + for (const envId of environments) { + const supported = COMPATIBILITY[envId] + if (!supported) { + throw new DvmiError(`Unknown environment "${envId}"`, `Valid environments are: ${KNOWN_ENVIRONMENTS.join(', ')}`) + } + if (!supported.includes(type)) { + throw new DvmiError( + `Environment "${envId}" does not support type "${type}"`, + `"${envId}" supports: ${supported.join(', ')}`, + ) + } + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Core I/O +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Load the AI config store from disk. + * Returns `{ version: 1, entries: [] }` if the file is missing or unparseable. + * @param {string} [configPath] - Override config path (used in tests; falls back to DVMI_AI_CONFIG_PATH or AI_CONFIG_PATH) + * @returns {Promise} + */ +export async function loadAIConfig(configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + if (!existsSync(configPath)) return defaultStore() + try { + const raw = await readFile(configPath, 'utf8') + const parsed = JSON.parse(raw) + return { + version: parsed.version ?? 1, + entries: Array.isArray(parsed.entries) ? parsed.entries : [], + } + } catch { + return defaultStore() + } +} + +/** + * Persist the AI config store to disk. + * Creates the parent directory if it does not exist and sets file permissions to 0o600. + * @param {AIConfigStore} store + * @param {string} [configPath] - Override config path (used in tests) + * @returns {Promise} + */ +export async function saveAIConfig(store, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const dir = dirname(configPath) + if (!existsSync(dir)) { + await mkdir(dir, {recursive: true}) + } + await writeFile(configPath, JSON.stringify(store, null, 2), 'utf8') + await chmod(configPath, 0o600) +} + +// ────────────────────────────────────────────────────────────────────────────── +// CRUD operations +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Add a new entry to the AI config store. + * @param {{ name: string, type: CategoryType, environments: EnvironmentId[], params: MCPParams|CommandParams|SkillParams|AgentParams }} entryData + * @param {string} [configPath] + * @returns {Promise} + */ +export async function addEntry(entryData, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const {name, type, environments, params} = entryData + + validateName(name) + validateEnvironments(environments, type) + + const store = await loadAIConfig(configPath) + + const duplicate = store.entries.find((e) => e.name === name && e.type === type) + if (duplicate) { + throw new DvmiError( + `An entry named "${name}" of type "${type}" already exists`, + 'Use a unique name or update the existing entry with `dvmi sync-config-ai update`', + ) + } + + const now = new Date().toISOString() + /** @type {CategoryEntry} */ + const entry = { + id: randomUUID(), + name, + type, + active: true, + environments, + params, + createdAt: now, + updatedAt: now, + } + + store.entries.push(entry) + await saveAIConfig(store, configPath) + await syncAIConfigToChezmoi() + return entry +} + +/** + * Update an existing entry by id. + * @param {string} id - UUID of the entry to update + * @param {{ name?: string, environments?: EnvironmentId[], params?: MCPParams|CommandParams|SkillParams|AgentParams, active?: boolean }} changes + * @param {string} [configPath] + * @returns {Promise} + */ +export async function updateEntry(id, changes, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const store = await loadAIConfig(configPath) + + const index = store.entries.findIndex((e) => e.id === id) + if (index === -1) { + throw new DvmiError( + `Entry with id "${id}" not found`, + 'Run `dvmi sync-config-ai list` to see available entries and their IDs', + ) + } + + const existing = store.entries[index] + + if (changes.name !== undefined) { + validateName(changes.name) + if (changes.name !== existing.name) { + const duplicate = store.entries.find((e) => e.id !== id && e.name === changes.name && e.type === existing.type) + if (duplicate) { + throw new DvmiError( + `An entry named "${changes.name}" of type "${existing.type}" already exists`, + 'Choose a different name or update the conflicting entry', + ) + } + } + } + + const newEnvironments = changes.environments ?? existing.environments + const newType = existing.type + if (changes.environments !== undefined) { + validateEnvironments(newEnvironments, newType) + } + + /** @type {CategoryEntry} */ + const updated = { + ...existing, + ...(changes.name !== undefined ? {name: changes.name} : {}), + ...(changes.environments !== undefined ? {environments: changes.environments} : {}), + ...(changes.params !== undefined ? {params: changes.params} : {}), + ...(changes.active !== undefined ? {active: changes.active} : {}), + updatedAt: new Date().toISOString(), + } + + store.entries[index] = updated + await saveAIConfig(store, configPath) + await syncAIConfigToChezmoi() + return updated +} + +/** + * Set an entry's `active` flag to `false`. + * @param {string} id - UUID of the entry to deactivate + * @param {string} [configPath] + * @returns {Promise} + */ +export async function deactivateEntry(id, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + return updateEntry(id, {active: false}, configPath) +} + +/** + * Set an entry's `active` flag to `true`. + * @param {string} id - UUID of the entry to activate + * @param {string} [configPath] + * @returns {Promise} + */ +export async function activateEntry(id, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + return updateEntry(id, {active: true}, configPath) +} + +/** + * Permanently remove an entry from the store. + * @param {string} id - UUID of the entry to delete + * @param {string} [configPath] + * @returns {Promise} + */ +export async function deleteEntry(id, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const store = await loadAIConfig(configPath) + + const index = store.entries.findIndex((e) => e.id === id) + if (index === -1) { + throw new DvmiError( + `Entry with id "${id}" not found`, + 'Run `dvmi sync-config-ai list` to see available entries and their IDs', + ) + } + + store.entries.splice(index, 1) + await saveAIConfig(store, configPath) + await syncAIConfigToChezmoi() +} + +/** + * Return all active entries that target a given environment. + * @param {EnvironmentId} envId + * @param {string} [configPath] + * @returns {Promise} + */ +export async function getEntriesByEnvironment(envId, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const store = await loadAIConfig(configPath) + return store.entries.filter((e) => e.active && e.environments.includes(envId)) +} + +/** + * Return all entries (active and inactive) of a given type. + * @param {CategoryType} type + * @param {string} [configPath] + * @returns {Promise} + */ +export async function getEntriesByType(type, configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH) { + const store = await loadAIConfig(configPath) + return store.entries.filter((e) => e.type === type) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Chezmoi sync +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Sync the AI config file to chezmoi if dotfiles management is enabled. + * Non-blocking — silently ignores errors. + * @returns {Promise} + */ +export async function syncAIConfigToChezmoi() { + try { + const cliConfig = await loadConfig() + if (!cliConfig.dotfiles?.enabled) return + const configPath = process.env.DVMI_AI_CONFIG_PATH ?? AI_CONFIG_PATH + await exec('chezmoi', ['add', configPath]) + } catch { + // Non-blocking — chezmoi sync failures should not disrupt the user's workflow + } +} diff --git a/src/services/ai-env-deployer.js b/src/services/ai-env-deployer.js new file mode 100644 index 0000000..d01b9f2 --- /dev/null +++ b/src/services/ai-env-deployer.js @@ -0,0 +1,444 @@ +/** + * @module ai-env-deployer + * Translates dvmi's abstract CategoryEntry objects into actual filesystem writes + * (JSON mutations for MCP servers, markdown/TOML files for commands, skills, and agents) + * for each supported AI coding environment. + */ + +import {readFile, writeFile, mkdir, rm} from 'node:fs/promises' +import {existsSync} from 'node:fs' +import {join, dirname} from 'node:path' +import {homedir} from 'node:os' + +/** @import { CategoryEntry, CategoryType, EnvironmentId, DetectedEnvironment } from '../types.js' */ + +// ────────────────────────────────────────────────────────────────────────────── +// Path & key resolution tables +// ────────────────────────────────────────────────────────────────────────────── + +/** + * For each environment, the target JSON file path (relative to cwd or absolute) + * and the root key that holds the MCP server map. + * + * @type {Record string, mcpKey: string }>} + */ +const MCP_TARGETS = { + 'vscode-copilot': { + resolvePath: (cwd) => join(cwd, '.vscode', 'mcp.json'), + mcpKey: 'servers', + }, + 'claude-code': { + resolvePath: (cwd) => join(cwd, '.mcp.json'), + mcpKey: 'mcpServers', + }, + opencode: { + resolvePath: (cwd) => join(cwd, 'opencode.json'), + mcpKey: 'mcpServers', + }, + 'gemini-cli': { + resolvePath: (_cwd) => join(homedir(), '.gemini', 'settings.json'), + mcpKey: 'mcpServers', + }, + 'copilot-cli': { + resolvePath: (_cwd) => join(homedir(), '.copilot', 'mcp-config.json'), + mcpKey: 'mcpServers', + }, +} + +/** + * Resolve the target file path for a file-based entry (command, skill, agent). + * + * @param {string} name - Entry name (used as filename base) + * @param {CategoryType} type - Category type + * @param {EnvironmentId} envId - Target environment + * @param {string} cwd - Project working directory + * @returns {string} Absolute path to write + */ +function resolveFilePath(name, type, envId, cwd) { + switch (type) { + case 'command': + return resolveCommandPath(name, envId, cwd) + case 'skill': + return resolveSkillPath(name, envId, cwd) + case 'agent': + return resolveAgentPath(name, envId, cwd) + default: + throw new Error(`Unsupported file entry type: ${type}`) + } +} + +/** + * @param {string} name + * @param {EnvironmentId} envId + * @param {string} cwd + * @returns {string} + */ +function resolveCommandPath(name, envId, cwd) { + switch (envId) { + case 'vscode-copilot': + return join(cwd, '.github', 'prompts', `${name}.prompt.md`) + case 'claude-code': + return join(cwd, '.claude', 'commands', `${name}.md`) + case 'opencode': + return join(cwd, '.opencode', 'commands', `${name}.md`) + case 'gemini-cli': + return join(homedir(), '.gemini', 'commands', `${name}.toml`) + case 'copilot-cli': + // shared path with vscode-copilot for commands + return join(cwd, '.github', 'prompts', `${name}.prompt.md`) + default: + throw new Error(`Unknown environment for command: ${envId}`) + } +} + +/** + * @param {string} name + * @param {EnvironmentId} envId + * @param {string} cwd + * @returns {string} + */ +function resolveSkillPath(name, envId, cwd) { + switch (envId) { + case 'vscode-copilot': + // vscode uses a nested directory with SKILL.md inside + return join(cwd, '.github', 'skills', name, 'SKILL.md') + case 'claude-code': + return join(cwd, '.claude', 'skills', `${name}.md`) + case 'opencode': + return join(cwd, '.opencode', 'skills', `${name}.md`) + case 'copilot-cli': + return join(homedir(), '.copilot', 'skills', `${name}.md`) + default: + throw new Error(`Environment "${envId}" does not support skill entries`) + } +} + +/** + * @param {string} name + * @param {EnvironmentId} envId + * @param {string} cwd + * @returns {string} + */ +function resolveAgentPath(name, envId, cwd) { + switch (envId) { + case 'vscode-copilot': + return join(cwd, '.github', 'agents', `${name}.agent.md`) + case 'claude-code': + return join(cwd, '.claude', 'agents', `${name}.md`) + case 'opencode': + return join(cwd, '.opencode', 'agents', `${name}.md`) + case 'copilot-cli': + return join(homedir(), '.copilot', 'agents', `${name}.md`) + default: + throw new Error(`Environment "${envId}" does not support agent entries`) + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// TOML rendering +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Render a Gemini CLI command entry as a TOML string. + * No external TOML library is used — we generate the string directly. + * + * @param {string} description - Short description of the command + * @param {string} content - Prompt text content + * @returns {string} TOML-formatted string + */ +function renderGeminiToml(description, content) { + // Escape triple-quotes inside the content to prevent TOML parse errors + const safeContent = content.replace(/"""/g, '\\"\\"\\"') + return `description = ${JSON.stringify(description)} + +[prompt] +text = """ +${safeContent} +""" +` +} + +// ────────────────────────────────────────────────────────────────────────────── +// JSON helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Read a JSON file from disk. Returns an empty object when the file is missing. + * Throws if the file exists but cannot be parsed. + * + * @param {string} filePath - Absolute path to the JSON file + * @returns {Promise>} + */ +async function readJsonOrEmpty(filePath) { + if (!existsSync(filePath)) return {} + const raw = await readFile(filePath, 'utf8') + return JSON.parse(raw) +} + +/** + * Write a value to disk as pretty-printed JSON, creating parent directories + * as needed. + * + * @param {string} filePath - Absolute path + * @param {unknown} data - Serialisable value + * @returns {Promise} + */ +async function writeJson(filePath, data) { + const dir = dirname(filePath) + if (!existsSync(dir)) { + await mkdir(dir, {recursive: true}) + } + await writeFile(filePath, JSON.stringify(data, null, 2), 'utf8') +} + +// ────────────────────────────────────────────────────────────────────────────── +// Build MCP server object from entry params +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Convert an MCP entry's params into the server descriptor object written into + * the target JSON file. + * + * @param {import('../types.js').MCPParams} params + * @returns {Record} + */ +function buildMCPServerObject(params) { + /** @type {Record} */ + const server = {} + + if (params.command !== undefined) server.command = params.command + if (params.args !== undefined) server.args = params.args + if (params.env !== undefined) server.env = params.env + if (params.url !== undefined) server.url = params.url + if (params.transport !== undefined) server.type = params.transport + + return server +} + +// ────────────────────────────────────────────────────────────────────────────── +// Public API — MCP +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Deploy an MCP entry to a specific AI environment by merging it into the + * appropriate JSON config file. Creates the file (and parent directories) if + * it does not yet exist. Existing entries under other names are preserved. + * + * Skips silently when: + * - `entry` is falsy + * - `entry.type` is not `'mcp'` + * - `entry.params` is absent + * + * @param {CategoryEntry} entry - The MCP entry to deploy + * @param {EnvironmentId} envId - Target environment identifier + * @param {string} cwd - Project working directory (used for project-relative paths) + * @returns {Promise} + */ +export async function deployMCPEntry(entry, envId, cwd) { + if (!entry || entry.type !== 'mcp' || !entry.params) return + + const target = MCP_TARGETS[envId] + if (!target) return + + const filePath = target.resolvePath(cwd) + const json = await readJsonOrEmpty(filePath) + + if (!json[target.mcpKey] || typeof json[target.mcpKey] !== 'object') { + json[target.mcpKey] = {} + } + + /** @type {Record} */ + const mcpKey = /** @type {any} */ (json[target.mcpKey]) + mcpKey[entry.name] = buildMCPServerObject(/** @type {import('../types.js').MCPParams} */ (entry.params)) + + await writeJson(filePath, json) +} + +/** + * Remove an MCP entry by name from a specific AI environment's JSON config file. + * If the file does not exist the function is a no-op. + * If the MCP key becomes empty after removal, it is kept as an empty object + * (the structure is preserved). + * + * @param {string} entryName - Name of the MCP server to remove + * @param {EnvironmentId} envId - Target environment identifier + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function undeployMCPEntry(entryName, envId, cwd) { + const target = MCP_TARGETS[envId] + if (!target) return + + const filePath = target.resolvePath(cwd) + if (!existsSync(filePath)) return + + const json = await readJsonOrEmpty(filePath) + + if (json[target.mcpKey] && typeof json[target.mcpKey] === 'object') { + delete (/** @type {any} */ (json[target.mcpKey])[entryName]) + } + + await writeJson(filePath, json) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Public API — File-based entries +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Deploy a file-based entry (command, skill, or agent) to a specific AI + * environment. Creates parent directories as needed. + * + * For Gemini CLI commands the output is TOML; for everything else it is the raw + * markdown content stored in `entry.params.content` or `entry.params.instructions`. + * + * For VS Code Copilot skills the directory structure `{name}/SKILL.md` is + * created automatically. + * + * Skips silently when: + * - `entry` is falsy + * - `entry.type` is `'mcp'` (wrong function) + * - `entry.params` is absent + * + * @param {CategoryEntry} entry - The entry to deploy + * @param {EnvironmentId} envId - Target environment identifier + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function deployFileEntry(entry, envId, cwd) { + if (!entry || entry.type === 'mcp' || !entry.params) return + + const filePath = resolveFilePath(entry.name, entry.type, envId, cwd) + const dir = dirname(filePath) + + if (!existsSync(dir)) { + await mkdir(dir, {recursive: true}) + } + + const params = /** @type {any} */ (entry.params) + + // Gemini CLI commands use TOML format + if (envId === 'gemini-cli' && entry.type === 'command') { + const description = params.description ?? '' + const content = params.content ?? '' + await writeFile(filePath, renderGeminiToml(description, content), 'utf8') + return + } + + // All other file entries use markdown + const content = params.content ?? params.instructions ?? '' + await writeFile(filePath, content, 'utf8') +} + +/** + * Remove a deployed file-based entry from disk. This is a no-op if the file + * does not exist. + * + * @param {string} entryName - Name of the entry (used to derive the file path) + * @param {CategoryType} type - Category type of the entry + * @param {EnvironmentId} envId - Target environment identifier + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function undeployFileEntry(entryName, type, envId, cwd) { + if (type === 'mcp') return + + const filePath = resolveFilePath(entryName, type, envId, cwd) + if (!existsSync(filePath)) return + + await rm(filePath, {force: true}) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Public API — Composite helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Deploy an entry to all of its target environments that are currently detected + * and readable. + * + * - Environments listed in `entry.environments` but absent from `detectedEnvs` + * are silently skipped. + * - Environments that are detected but have unreadable JSON config files are + * also skipped (to avoid clobbering corrupt files). + * + * @param {CategoryEntry} entry - The entry to deploy + * @param {DetectedEnvironment[]} detectedEnvs - Environments found on the current machine + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function deployEntry(entry, detectedEnvs, cwd) { + if (!entry) return + + const detectedIds = new Set(detectedEnvs.map((e) => e.id)) + + for (const envId of entry.environments) { + if (!detectedIds.has(envId)) continue + + const detectedEnv = detectedEnvs.find((e) => e.id === envId) + // Skip if the environment has unreadable JSON config files that correspond + // to the MCP target path (we don't want to overwrite corrupt files) + if (detectedEnv && entry.type === 'mcp') { + const target = MCP_TARGETS[envId] + if (target) { + const targetPath = target.resolvePath(cwd) + if (detectedEnv.unreadable.includes(targetPath)) continue + } + } + + if (entry.type === 'mcp') { + await deployMCPEntry(entry, envId, cwd) + } else { + await deployFileEntry(entry, envId, cwd) + } + } +} + +/** + * Undeploy an entry from all of its target environments that are currently + * detected. This is safe to call even when `entry` is `null` or `undefined` + * (it becomes a no-op). + * + * @param {CategoryEntry | null | undefined} entry - The entry to undeploy + * @param {DetectedEnvironment[]} detectedEnvs - Environments found on the current machine + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function undeployEntry(entry, detectedEnvs, cwd) { + if (!entry) return + + const detectedIds = new Set(detectedEnvs.map((e) => e.id)) + + for (const envId of entry.environments) { + if (!detectedIds.has(envId)) continue + + if (entry.type === 'mcp') { + await undeployMCPEntry(entry.name, envId, cwd) + } else { + await undeployFileEntry(entry.name, entry.type, envId, cwd) + } + } +} + +/** + * Reconcile all active entries against the currently detected environments. + * + * For each active entry, every detected environment listed in + * `entry.environments` is deployed (idempotent write). Environments that are + * listed but not currently detected are left untouched — we never undeploy on + * scan because the files may have been managed by the user directly + * (FR-004d: re-activation on re-detection). + * + * Inactive entries are not touched. + * + * @param {CategoryEntry[]} entries - All managed entries from the AI config store + * @param {DetectedEnvironment[]} detectedEnvs - Environments found on the current machine + * @param {string} cwd - Project working directory + * @returns {Promise} + */ +export async function reconcileOnScan(entries, detectedEnvs, cwd) { + for (const entry of entries) { + if (!entry.active) continue + await deployEntry(entry, detectedEnvs, cwd) + } +} diff --git a/src/services/ai-env-scanner.js b/src/services/ai-env-scanner.js new file mode 100644 index 0000000..518283c --- /dev/null +++ b/src/services/ai-env-scanner.js @@ -0,0 +1,242 @@ +/** + * @module ai-env-scanner + * Detects AI coding environments by scanning well-known project and global config paths. + */ + +import {existsSync, readFileSync} from 'node:fs' +import {resolve, join} from 'node:path' +import {homedir} from 'node:os' + +/** @import { CategoryType, EnvironmentId, PathStatus, CategoryCounts, DetectedEnvironment, CategoryEntry } from '../types.js' */ + +/** + * @typedef {Object} PathSpec + * @property {string} path - Relative (project) or absolute (global) path string + * @property {boolean} isJson - Whether to attempt JSON.parse after reading + */ + +/** + * @typedef {Object} EnvironmentDef + * @property {EnvironmentId} id + * @property {string} name - Display name + * @property {PathSpec[]} projectPaths - Paths relative to cwd + * @property {PathSpec[]} globalPaths - Absolute paths (resolved from homedir) + * @property {CategoryType[]} supportedCategories + */ + +/** + * All recognised AI coding environments with their detection paths and capabilities. + * @type {Readonly} + */ +export const ENVIRONMENTS = Object.freeze([ + { + id: /** @type {EnvironmentId} */ ('vscode-copilot'), + name: 'VS Code Copilot', + projectPaths: [ + {path: '.github/copilot-instructions.md', isJson: false}, + {path: '.vscode/mcp.json', isJson: true}, + {path: '.github/instructions/', isJson: false}, + {path: '.github/prompts/', isJson: false}, + {path: '.github/agents/', isJson: false}, + {path: '.github/skills/', isJson: false}, + ], + globalPaths: [], + supportedCategories: /** @type {CategoryType[]} */ (['mcp', 'command', 'skill', 'agent']), + }, + { + id: /** @type {EnvironmentId} */ ('claude-code'), + name: 'Claude Code', + projectPaths: [ + {path: 'CLAUDE.md', isJson: false}, + {path: '.mcp.json', isJson: true}, + {path: '.claude/commands/', isJson: false}, + {path: '.claude/skills/', isJson: false}, + {path: '.claude/agents/', isJson: false}, + {path: '.claude/rules/', isJson: false}, + ], + globalPaths: [], + supportedCategories: /** @type {CategoryType[]} */ (['mcp', 'command', 'skill', 'agent']), + }, + { + id: /** @type {EnvironmentId} */ ('opencode'), + name: 'OpenCode', + projectPaths: [ + {path: 'AGENTS.md', isJson: false}, + {path: '.opencode/commands/', isJson: false}, + {path: '.opencode/skills/', isJson: false}, + {path: '.opencode/agents/', isJson: false}, + {path: 'opencode.json', isJson: true}, + ], + globalPaths: [ + {path: '~/.config/opencode/opencode.json', isJson: true}, + {path: '~/.config/opencode/commands/', isJson: false}, + {path: '~/.config/opencode/agents/', isJson: false}, + {path: '~/.config/opencode/skills/', isJson: false}, + ], + supportedCategories: /** @type {CategoryType[]} */ (['mcp', 'command', 'skill', 'agent']), + }, + { + id: /** @type {EnvironmentId} */ ('gemini-cli'), + name: 'Gemini CLI', + projectPaths: [{path: 'GEMINI.md', isJson: false}], + globalPaths: [ + {path: '~/.gemini/settings.json', isJson: true}, + {path: '~/.gemini/commands/', isJson: false}, + ], + supportedCategories: /** @type {CategoryType[]} */ (['mcp', 'command']), + }, + { + id: /** @type {EnvironmentId} */ ('copilot-cli'), + name: 'GitHub Copilot CLI', + projectPaths: [], + globalPaths: [ + {path: '~/.copilot/config.json', isJson: true}, + {path: '~/.copilot/mcp-config.json', isJson: true}, + {path: '~/.copilot/agents/', isJson: false}, + {path: '~/.copilot/skills/', isJson: false}, + {path: '~/.copilot/copilot-instructions.md', isJson: false}, + ], + supportedCategories: /** @type {CategoryType[]} */ (['mcp', 'command', 'skill', 'agent']), + }, +]) + +/** + * Resolve a path spec into an absolute path. + * Project paths are resolved relative to `cwd`; global paths have their `~/` prefix + * replaced with the actual home directory. + * + * @param {PathSpec} spec + * @param {string} cwd + * @param {boolean} isGlobal + * @returns {string} + */ +function resolvePathSpec(spec, cwd, isGlobal) { + if (isGlobal) { + // Global paths are stored with a leading `~/` + return resolve(join(homedir(), spec.path.replace(/^~\//, ''))) + } + return resolve(join(cwd, spec.path)) +} + +/** + * Build a PathStatus for one path spec. + * For JSON files that exist, attempt to parse them; failure marks the path as unreadable. + * + * @param {PathSpec} spec + * @param {string} absolutePath + * @param {string[]} unreadable - Mutable array; unreadable paths are pushed here + * @returns {PathStatus} + */ +function evaluatePathSpec(spec, absolutePath, unreadable) { + const exists = existsSync(absolutePath) + + if (!exists) { + return {path: absolutePath, exists: false, readable: false} + } + + if (!spec.isJson) { + return {path: absolutePath, exists: true, readable: true} + } + + // JSON file — try to parse + try { + JSON.parse(readFileSync(absolutePath, 'utf8')) + return {path: absolutePath, exists: true, readable: true} + } catch { + unreadable.push(absolutePath) + return {path: absolutePath, exists: true, readable: false} + } +} + +/** + * Compute the detection scope based on which path groups produced hits. + * + * @param {PathStatus[]} projectStatuses + * @param {PathStatus[]} globalStatuses + * @returns {'project'|'global'|'both'} + */ +function computeScope(projectStatuses, globalStatuses) { + const hasProject = projectStatuses.some((s) => s.exists) + const hasGlobal = globalStatuses.some((s) => s.exists) + + if (hasProject && hasGlobal) return 'both' + if (hasGlobal) return 'global' + return 'project' +} + +/** + * Scan the filesystem for each known AI coding environment and return only those + * that were detected (i.e. at least one config path exists on disk). + * + * @param {string} [cwd] - Working directory for project-relative path resolution (defaults to process.cwd()) + * @returns {DetectedEnvironment[]} Detected environments only + */ +export function scanEnvironments(cwd = process.cwd()) { + /** @type {DetectedEnvironment[]} */ + const detected = [] + + for (const env of ENVIRONMENTS) { + /** @type {string[]} */ + const unreadable = [] + + const projectStatuses = env.projectPaths.map((spec) => { + const absPath = resolvePathSpec(spec, cwd, false) + return evaluatePathSpec(spec, absPath, unreadable) + }) + + const globalStatuses = env.globalPaths.map((spec) => { + const absPath = resolvePathSpec(spec, cwd, true) + return evaluatePathSpec(spec, absPath, unreadable) + }) + + const isDetected = [...projectStatuses, ...globalStatuses].some((s) => s.exists) + + if (!isDetected) continue + + detected.push({ + id: env.id, + name: env.name, + detected: true, + projectPaths: projectStatuses, + globalPaths: globalStatuses, + unreadable, + supportedCategories: env.supportedCategories, + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: computeScope(projectStatuses, globalStatuses), + }) + } + + return detected +} + +/** + * Filter detected environments to those that support a given category type. + * + * @param {CategoryType} type - Category type to filter by + * @param {DetectedEnvironment[]} detectedEnvs - Array of detected environments from {@link scanEnvironments} + * @returns {EnvironmentId[]} IDs of environments that support the given type + */ +export function getCompatibleEnvironments(type, detectedEnvs) { + return detectedEnvs.filter((env) => env.supportedCategories.includes(type)).map((env) => env.id) +} + +/** + * Count active entries from the AI config store that target a given environment, + * grouped by category type. + * + * @param {EnvironmentId} envId - Environment to count entries for + * @param {CategoryEntry[]} entries - All entries from the AI config store + * @returns {CategoryCounts} Per-category active entry counts + */ +export function computeCategoryCounts(envId, entries) { + /** @type {CategoryCounts} */ + const counts = {mcp: 0, command: 0, skill: 0, agent: 0} + + for (const entry of entries) { + if (entry.active && entry.environments.includes(envId)) { + counts[entry.type] = (counts[entry.type] ?? 0) + 1 + } + } + + return counts +} diff --git a/src/services/audit-detector.js b/src/services/audit-detector.js index 25bf036..90723c7 100644 --- a/src/services/audit-detector.js +++ b/src/services/audit-detector.js @@ -1,5 +1,5 @@ -import { existsSync } from 'node:fs' -import { resolve, join } from 'node:path' +import {existsSync} from 'node:fs' +import {resolve, join} from 'node:path' /** @import { PackageEcosystem } from '../types.js' */ diff --git a/src/services/audit-runner.js b/src/services/audit-runner.js index cdbbbae..362949e 100644 --- a/src/services/audit-runner.js +++ b/src/services/audit-runner.js @@ -1,5 +1,5 @@ -import { execa } from 'execa' -import { dirname } from 'node:path' +import {execa} from 'execa' +import {dirname} from 'node:path' /** @import { PackageEcosystem, VulnerabilityFinding } from '../types.js' */ @@ -141,8 +141,9 @@ function parsePipAudit(data, ecosystem) { if (!Array.isArray(dep.vulns) || dep.vulns.length === 0) continue for (const vuln of dep.vulns) { // Determine best ID: prefer CVE - const cveId = vuln.id?.startsWith('CVE-') ? vuln.id - : (vuln.aliases ?? []).find((a) => a.startsWith('CVE-')) ?? null + const cveId = vuln.id?.startsWith('CVE-') + ? vuln.id + : ((vuln.aliases ?? []).find((a) => a.startsWith('CVE-')) ?? null) findings.push({ package: dep.name, @@ -151,9 +152,8 @@ function parsePipAudit(data, ecosystem) { cveId, advisoryUrl: null, title: vuln.description ?? null, - patchedVersions: Array.isArray(vuln.fix_versions) && vuln.fix_versions.length > 0 - ? `>=${vuln.fix_versions[0]}` - : null, + patchedVersions: + Array.isArray(vuln.fix_versions) && vuln.fix_versions.length > 0 ? `>=${vuln.fix_versions[0]}` : null, ecosystem, isDirect: null, }) @@ -177,9 +177,7 @@ function parseCargoAudit(data, ecosystem) { const advisory = item.advisory ?? {} const pkg = item.package ?? {} - const cveId = Array.isArray(advisory.aliases) - ? (advisory.aliases.find((a) => /^CVE-/i.test(a)) ?? null) - : null + const cveId = Array.isArray(advisory.aliases) ? (advisory.aliases.find((a) => /^CVE-/i.test(a)) ?? null) : null // CVSS vector string — extract base score from it? Too complex; mark Unknown for now findings.push({ @@ -275,55 +273,56 @@ export async function runAudit(ecosystem) { }) } catch (err) { // Binary not found — tool not installed - const errMsg = /** @type {any} */ (err).code === 'ENOENT' - ? `"${cmd}" is not installed. Install it to scan ${ecosystem.name} dependencies.` - : String(err) - return { findings: [], error: errMsg } + const errMsg = + /** @type {any} */ (err).code === 'ENOENT' + ? `"${cmd}" is not installed. Install it to scan ${ecosystem.name} dependencies.` + : String(err) + return {findings: [], error: errMsg} } const output = result.stdout ?? result.all ?? '' if (!output.trim()) { if (result.exitCode !== 0 && result.exitCode !== 1) { - return { findings: [], error: `${cmd} exited with code ${result.exitCode}: ${result.stderr ?? ''}` } + return {findings: [], error: `${cmd} exited with code ${result.exitCode}: ${result.stderr ?? ''}`} } - return { findings: [], error: null } + return {findings: [], error: null} } try { switch (ecosystem.name) { case 'npm': { const data = JSON.parse(output) - return { findings: parseNpmAudit(data, ecosystem.name), error: null } + return {findings: parseNpmAudit(data, ecosystem.name), error: null} } case 'pnpm': { const data = JSON.parse(output) - return { findings: parsePnpmAudit(data, ecosystem.name), error: null } + return {findings: parsePnpmAudit(data, ecosystem.name), error: null} } case 'yarn': { - return { findings: parseYarnAudit(output, ecosystem.name), error: null } + return {findings: parseYarnAudit(output, ecosystem.name), error: null} } case 'pip': { const data = JSON.parse(output) - return { findings: parsePipAudit(data, ecosystem.name), error: null } + return {findings: parsePipAudit(data, ecosystem.name), error: null} } case 'cargo': { const data = JSON.parse(output) - return { findings: parseCargoAudit(data, ecosystem.name), error: null } + return {findings: parseCargoAudit(data, ecosystem.name), error: null} } case 'bundler': { const data = JSON.parse(output) - return { findings: parseBundlerAudit(data, ecosystem.name), error: null } + return {findings: parseBundlerAudit(data, ecosystem.name), error: null} } case 'composer': { const data = JSON.parse(output) - return { findings: parseComposerAudit(data, ecosystem.name), error: null } + return {findings: parseComposerAudit(data, ecosystem.name), error: null} } default: - return { findings: [], error: `Unknown ecosystem: ${ecosystem.name}` } + return {findings: [], error: `Unknown ecosystem: ${ecosystem.name}`} } } catch (parseErr) { - return { findings: [], error: `Failed to parse ${ecosystem.name} audit output: ${parseErr.message}` } + return {findings: [], error: `Failed to parse ${ecosystem.name} audit output: ${parseErr.message}`} } } @@ -333,15 +332,25 @@ export async function runAudit(ecosystem) { * @returns {import('../types.js').ScanSummary} */ export function summarizeFindings(findings) { - const summary = { critical: 0, high: 0, medium: 0, low: 0, unknown: 0, total: 0 } + const summary = {critical: 0, high: 0, medium: 0, low: 0, unknown: 0, total: 0} for (const f of findings) { summary.total++ switch (f.severity) { - case 'Critical': summary.critical++; break - case 'High': summary.high++; break - case 'Medium': summary.medium++; break - case 'Low': summary.low++; break - default: summary.unknown++; break + case 'Critical': + summary.critical++ + break + case 'High': + summary.high++ + break + case 'Medium': + summary.medium++ + break + case 'Low': + summary.low++ + break + default: + summary.unknown++ + break } } return summary diff --git a/src/services/auth.js b/src/services/auth.js index 324c207..47edc30 100644 --- a/src/services/auth.js +++ b/src/services/auth.js @@ -1,5 +1,5 @@ -import { exec } from './shell.js' -import { loadConfig } from './config.js' +import {exec} from './shell.js' +import {loadConfig} from './config.js' /** * @typedef {Object} AuthStatus @@ -17,11 +17,11 @@ import { loadConfig } from './config.js' export async function checkGitHubAuth() { const result = await exec('gh', ['auth', 'status']) if (result.exitCode !== 0) { - return { authenticated: false, error: result.stderr } + return {authenticated: false, error: result.stderr} } // Extract username from output like "Logged in to github.com as username" const match = result.stderr.match(/Logged in to .+ as (\S+)/) - return { authenticated: true, username: match?.[1] ?? 'unknown' } + return {authenticated: true, username: match?.[1] ?? 'unknown'} } /** @@ -31,7 +31,7 @@ export async function checkGitHubAuth() { export async function loginGitHub() { const result = await exec('gh', ['auth', 'login', '--web']) if (result.exitCode !== 0) { - return { authenticated: false, error: result.stderr } + return {authenticated: false, error: result.stderr} } return checkGitHubAuth() } @@ -42,7 +42,7 @@ export async function loginGitHub() { */ export async function checkAWSAuth() { const config = await loadConfig() - if (!config.awsProfile) return { authenticated: false, error: 'No AWS profile configured' } + if (!config.awsProfile) return {authenticated: false, error: 'No AWS profile configured'} const result = await exec('aws-vault', [ 'exec', @@ -55,7 +55,7 @@ export async function checkAWSAuth() { 'json', ]) if (result.exitCode !== 0) { - return { authenticated: false, error: result.stderr || 'Session expired' } + return {authenticated: false, error: result.stderr || 'Session expired'} } try { const identity = JSON.parse(result.stdout) @@ -65,7 +65,7 @@ export async function checkAWSAuth() { role: identity.Arn?.split('/').at(-1), } } catch { - return { authenticated: false, error: 'Could not parse AWS identity' } + return {authenticated: false, error: 'Could not parse AWS identity'} } } @@ -77,7 +77,7 @@ export async function checkAWSAuth() { export async function loginAWS(profile) { const result = await exec('aws-vault', ['login', profile]) if (result.exitCode !== 0) { - return { authenticated: false, error: result.stderr } + return {authenticated: false, error: result.stderr} } return checkAWSAuth() } diff --git a/src/services/awesome-copilot.js b/src/services/awesome-copilot.js index d8cf80f..bf2e59f 100644 --- a/src/services/awesome-copilot.js +++ b/src/services/awesome-copilot.js @@ -1,5 +1,5 @@ -import { createOctokit } from './github.js' -import { DvmiError } from '../utils/errors.js' +import {createOctokit} from './github.js' +import {DvmiError} from '../utils/errors.js' /** @import { AwesomeEntry } from '../types.js' */ @@ -10,7 +10,7 @@ import { DvmiError } from '../utils/errors.js' */ export const AWESOME_CATEGORIES = ['agents', 'instructions', 'skills', 'plugins', 'hooks', 'workflows'] -const AWESOME_REPO = { owner: 'github', repo: 'awesome-copilot' } +const AWESOME_REPO = {owner: 'github', repo: 'awesome-copilot'} /** * Parse a GitHub-flavoured markdown table into AwesomeEntry objects. @@ -47,7 +47,10 @@ export function parseMarkdownTable(md, category) { if (/^[\*_]?name[\*_]?$/i.test(rawName)) continue // Strip badge images: [![alt](img)](url) → keep nothing; [![alt](img)] → keep nothing - const noBadge = rawName.replace(/\[!\[.*?\]\(.*?\)\]\(.*?\)/g, '').replace(/!\[.*?\]\(.*?\)/g, '').trim() + const noBadge = rawName + .replace(/\[!\[.*?\]\(.*?\)\]\(.*?\)/g, '') + .replace(/!\[.*?\]\(.*?\)/g, '') + .trim() // Extract [text](url) link const linkMatch = noBadge.match(/\[([^\]]+)\]\(([^)]+)\)/) diff --git a/src/services/aws-costs.js b/src/services/aws-costs.js index 75302e4..1c9f528 100644 --- a/src/services/aws-costs.js +++ b/src/services/aws-costs.js @@ -1,4 +1,4 @@ -import { CostExplorerClient, GetCostAndUsageCommand } from '@aws-sdk/client-cost-explorer' +import {CostExplorerClient, GetCostAndUsageCommand} from '@aws-sdk/client-cost-explorer' /** @import { AWSCostEntry, CostGroupMode, CostTrendSeries } from '../types.js' */ @@ -14,18 +14,18 @@ function getPeriodDates(period) { if (period === 'last-month') { const start = new Date(now.getFullYear(), now.getMonth() - 1, 1) const end = new Date(now.getFullYear(), now.getMonth(), 1) - return { start: fmt(start), end: fmt(end) } + return {start: fmt(start), end: fmt(end)} } if (period === 'last-week') { const end = new Date(now) end.setDate(now.getDate() - now.getDay()) const start = new Date(end) start.setDate(end.getDate() - 7) - return { start: fmt(start), end: fmt(end) } + return {start: fmt(start), end: fmt(end)} } // mtd const start = new Date(now.getFullYear(), now.getMonth(), 1) - return { start: fmt(start), end: fmt(now) } + return {start: fmt(start), end: fmt(now)} } /** @@ -36,7 +36,7 @@ export function getTwoMonthPeriod() { const now = new Date() const fmt = (d) => d.toISOString().split('T')[0] const start = new Date(now.getFullYear(), now.getMonth() - 2, 1) - return { start: fmt(start), end: fmt(now) } + return {start: fmt(start), end: fmt(now)} } /** @@ -62,15 +62,15 @@ function stripTagPrefix(rawKey) { */ function buildGroupBy(groupBy, tagKey) { if (groupBy === 'service') { - return [{ Type: 'DIMENSION', Key: 'SERVICE' }] + return [{Type: 'DIMENSION', Key: 'SERVICE'}] } if (groupBy === 'tag') { - return [{ Type: 'TAG', Key: tagKey ?? '' }] + return [{Type: 'TAG', Key: tagKey ?? ''}] } // both return [ - { Type: 'DIMENSION', Key: 'SERVICE' }, - { Type: 'TAG', Key: tagKey ?? '' }, + {Type: 'DIMENSION', Key: 'SERVICE'}, + {Type: 'TAG', Key: tagKey ?? ''}, ] } @@ -85,22 +85,22 @@ function buildGroupBy(groupBy, tagKey) { */ export async function getServiceCosts(serviceName, tags, period = 'last-month', groupBy = 'service', tagKey) { // Cost Explorer always uses us-east-1 - const client = new CostExplorerClient({ region: 'us-east-1' }) - const { start, end } = getPeriodDates(period) + const client = new CostExplorerClient({region: 'us-east-1'}) + const {start, end} = getPeriodDates(period) // Build tag filter from project tags const tagEntries = Object.entries(tags) const filter = tagEntries.length === 1 - ? { Tags: { Key: tagEntries[0][0], Values: [tagEntries[0][1]] } } + ? {Tags: {Key: tagEntries[0][0], Values: [tagEntries[0][1]]}} : { And: tagEntries.map(([k, v]) => ({ - Tags: { Key: k, Values: [v] }, + Tags: {Key: k, Values: [v]}, })), } const command = new GetCostAndUsageCommand({ - TimePeriod: { Start: start, End: end }, + TimePeriod: {Start: start, End: end}, Granularity: 'MONTHLY', Metrics: ['UnblendedCost'], Filter: filter, @@ -120,7 +120,7 @@ export async function getServiceCosts(serviceName, tags, period = 'last-month', serviceName: groupBy === 'tag' ? stripTagPrefix(keys[0] ?? '') : (keys[0] ?? 'Unknown'), amount, unit: group.Metrics?.UnblendedCost?.Unit ?? 'USD', - period: { start, end }, + period: {start, end}, } if (groupBy === 'both') { entry.tagValue = stripTagPrefix(keys[1] ?? '') @@ -132,7 +132,7 @@ export async function getServiceCosts(serviceName, tags, period = 'last-month', } } - return { entries, period: { start, end } } + return {entries, period: {start, end}} } /** @@ -143,8 +143,8 @@ export async function getServiceCosts(serviceName, tags, period = 'last-month', * @returns {Promise} */ export async function getTrendCosts(groupBy = 'service', tagKey) { - const client = new CostExplorerClient({ region: 'us-east-1' }) - const { start, end } = getTwoMonthPeriod() + const client = new CostExplorerClient({region: 'us-east-1'}) + const {start, end} = getTwoMonthPeriod() /** @type {Map>} seriesName → date → amount */ const seriesMap = new Map() @@ -152,11 +152,11 @@ export async function getTrendCosts(groupBy = 'service', tagKey) { let nextPageToken = undefined do { const command = new GetCostAndUsageCommand({ - TimePeriod: { Start: start, End: end }, + TimePeriod: {Start: start, End: end}, Granularity: 'DAILY', Metrics: ['UnblendedCost'], GroupBy: buildGroupBy(groupBy, tagKey), - ...(nextPageToken ? { NextPageToken: nextPageToken } : {}), + ...(nextPageToken ? {NextPageToken: nextPageToken} : {}), }) const result = await client.send(command) @@ -194,9 +194,9 @@ export async function getTrendCosts(groupBy = 'service', tagKey) { for (const [name, dateMap] of seriesMap) { const points = Array.from(dateMap.entries()) .sort(([a], [b]) => a.localeCompare(b)) - .map(([date, amount]) => ({ date, amount })) + .map(([date, amount]) => ({date, amount})) if (points.some((p) => p.amount > 0)) { - series.push({ name, points }) + series.push({name, points}) } } diff --git a/src/services/clickup.js b/src/services/clickup.js index cea61d4..2a0c308 100644 --- a/src/services/clickup.js +++ b/src/services/clickup.js @@ -1,7 +1,7 @@ import http from 'node:http' -import { randomBytes } from 'node:crypto' -import { openBrowser } from '../utils/open-browser.js' -import { loadConfig, saveConfig } from './config.js' +import {randomBytes} from 'node:crypto' +import {openBrowser} from '../utils/open-browser.js' +import {loadConfig, saveConfig} from './config.js' /** @import { ClickUpTask } from '../types.js' */ @@ -25,10 +25,10 @@ function localDateString(date) { async function getToken() { // Allow tests / CI to inject a token via environment variable if (process.env.CLICKUP_TOKEN) return process.env.CLICKUP_TOKEN - try { - const { default: keytar } = await import('keytar') - return keytar.getPassword('devvami', TOKEN_KEY) - } catch { + try { + const {default: keytar} = await import('keytar') + return keytar.getPassword('devvami', TOKEN_KEY) + } catch { // keytar not available (e.g. WSL2 without D-Bus) — fallback to config const config = await loadConfig() return config.clickup?.token ?? null @@ -41,17 +41,17 @@ async function getToken() { * @returns {Promise} */ export async function storeToken(token) { - try { - const { default: keytar } = await import('keytar') - await keytar.setPassword('devvami', TOKEN_KEY, token) - } catch { + try { + const {default: keytar} = await import('keytar') + await keytar.setPassword('devvami', TOKEN_KEY, token) + } catch { // Fallback: store in config (less secure) process.stderr.write( 'Warning: keytar unavailable. ClickUp token will be stored in plaintext.\n' + - 'Run `dvmi auth logout` after this session on shared machines.\n', + 'Run `dvmi auth logout` after this session on shared machines.\n', ) const config = await loadConfig() - await saveConfig({ ...config, clickup: { ...config.clickup, token } }) + await saveConfig({...config, clickup: {...config.clickup, token}}) } } @@ -81,8 +81,8 @@ export async function oauthFlow(clientId, clientSecret) { try { const resp = await fetch(`${API_BASE}/oauth/token`, { method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ client_id: clientId, client_secret: clientSecret, code }), + headers: {'Content-Type': 'application/json'}, + body: JSON.stringify({client_id: clientId, client_secret: clientSecret, code}), }) const data = /** @type {any} */ (await resp.json()) await storeToken(data.access_token) @@ -108,12 +108,12 @@ export async function oauthFlow(clientId, clientSecret) { * @param {number} [retries] * @returns {Promise} */ - async function clickupFetch(path, retries = 0) { - const MAX_RETRIES = 5 - const token = await getToken() - if (!token) throw new Error('ClickUp not authenticated. Run `dvmi init` to authorize.') +async function clickupFetch(path, retries = 0) { + const MAX_RETRIES = 5 + const token = await getToken() + if (!token) throw new Error('ClickUp not authenticated. Run `dvmi init` to authorize.') const resp = await fetch(`${API_BASE}${path}`, { - headers: { Authorization: token }, + headers: {Authorization: token}, }) if (resp.status === 429) { if (retries >= MAX_RETRIES) { @@ -136,7 +136,7 @@ export async function oauthFlow(clientId, clientSecret) { */ export async function getUser() { const data = /** @type {any} */ (await clickupFetch('/user')) - return { id: String(data.user.id), username: data.user.username } + return {id: String(data.user.id), username: data.user.username} } /** @@ -201,8 +201,8 @@ export async function getTasksToday(teamId) { const endOfTodayMs = new Date().setHours(23, 59, 59, 999) const [overdueTasks, inProgressTasks] = await Promise.all([ - getTasks(teamId, { due_date_lt: endOfTodayMs }), - getTasks(teamId, { status: 'in progress' }), + getTasks(teamId, {due_date_lt: endOfTodayMs}), + getTasks(teamId, {status: 'in progress'}), ]) // De-duplicate by task ID (a task may appear in both result sets) @@ -288,11 +288,11 @@ export async function isAuthenticated() { export async function validateToken() { try { const data = /** @type {any} */ (await clickupFetch('/user')) - return { valid: true, user: { id: data.user.id, username: data.user.username } } + return {valid: true, user: {id: data.user.id, username: data.user.username}} } catch (err) { // 401 or no token → not valid if (err instanceof Error && (err.message.includes('401') || err.message.includes('not authenticated'))) { - return { valid: false } + return {valid: false} } throw err } @@ -304,5 +304,5 @@ export async function validateToken() { */ export async function getTeams() { const data = /** @type {any} */ (await clickupFetch('/team')) - return (data.teams ?? []).map((t) => ({ id: String(t.id), name: t.name })) + return (data.teams ?? []).map((t) => ({id: String(t.id), name: t.name})) } diff --git a/src/services/cloudwatch-logs.js b/src/services/cloudwatch-logs.js index de55d38..87e23c6 100644 --- a/src/services/cloudwatch-logs.js +++ b/src/services/cloudwatch-logs.js @@ -1,8 +1,4 @@ -import { - CloudWatchLogsClient, - paginateDescribeLogGroups, - FilterLogEventsCommand, -} from '@aws-sdk/client-cloudwatch-logs' +import {CloudWatchLogsClient, paginateDescribeLogGroups, FilterLogEventsCommand} from '@aws-sdk/client-cloudwatch-logs' /** @import { LogGroup, LogEvent, LogFilterResult } from '../types.js' */ @@ -20,7 +16,7 @@ export function sinceToEpochMs(since) { } const offset = MS[since] if (!offset) throw new Error(`Invalid since value: ${since}. Must be one of: 1h, 24h, 7d`) - return { startTime: now - offset, endTime: now } + return {startTime: now - offset, endTime: now} } /** @@ -29,11 +25,11 @@ export function sinceToEpochMs(since) { * @returns {Promise} */ export async function listLogGroups(region = 'eu-west-1') { - const client = new CloudWatchLogsClient({ region }) + const client = new CloudWatchLogsClient({region}) /** @type {LogGroup[]} */ const groups = [] - const paginator = paginateDescribeLogGroups({ client }, {}) + const paginator = paginateDescribeLogGroups({client}, {}) for await (const page of paginator) { for (const lg of page.logGroups ?? []) { groups.push({ @@ -59,7 +55,7 @@ export async function listLogGroups(region = 'eu-west-1') { * @returns {Promise} */ export async function filterLogEvents(logGroupName, filterPattern, startTime, endTime, limit, region = 'eu-west-1') { - const client = new CloudWatchLogsClient({ region }) + const client = new CloudWatchLogsClient({region}) const command = new FilterLogEventsCommand({ logGroupName, diff --git a/src/services/config.js b/src/services/config.js index fea53a6..a04bebd 100644 --- a/src/services/config.js +++ b/src/services/config.js @@ -1,13 +1,13 @@ -import { readFile, writeFile, mkdir, chmod } from 'node:fs/promises' -import { existsSync, readFileSync } from 'node:fs' -import { join } from 'node:path' -import { homedir } from 'node:os' +import {readFile, writeFile, mkdir, chmod} from 'node:fs/promises' +import {existsSync, readFileSync} from 'node:fs' +import {join} from 'node:path' +import {homedir} from 'node:os' /** @import { CLIConfig } from '../types.js' */ const CONFIG_DIR = process.env.XDG_CONFIG_HOME - ? join(process.env.XDG_CONFIG_HOME, 'dvmi') - : join(homedir(), '.config', 'dvmi') + ? join(process.env.XDG_CONFIG_HOME, 'dvmi') + : join(homedir(), '.config', 'dvmi') export const CONFIG_PATH = join(CONFIG_DIR, 'config.json') @@ -26,12 +26,12 @@ const DEFAULTS = { * @returns {Promise} */ export async function loadConfig(configPath = process.env.DVMI_CONFIG_PATH ?? CONFIG_PATH) { - if (!existsSync(configPath)) return { ...DEFAULTS } + if (!existsSync(configPath)) return {...DEFAULTS} try { const raw = await readFile(configPath, 'utf8') - return { ...DEFAULTS, ...JSON.parse(raw) } + return {...DEFAULTS, ...JSON.parse(raw)} } catch { - return { ...DEFAULTS } + return {...DEFAULTS} } } @@ -44,7 +44,7 @@ export async function loadConfig(configPath = process.env.DVMI_CONFIG_PATH ?? CO export async function saveConfig(config, configPath = CONFIG_PATH) { const dir = configPath.replace(/\/[^/]+$/, '') if (!existsSync(dir)) { - await mkdir(dir, { recursive: true }) + await mkdir(dir, {recursive: true}) } await writeFile(configPath, JSON.stringify(config, null, 2), 'utf8') await chmod(configPath, 0o600) @@ -66,11 +66,11 @@ export function configExists(configPath = CONFIG_PATH) { * @returns {CLIConfig} */ export function loadConfigSync(configPath = process.env.DVMI_CONFIG_PATH ?? CONFIG_PATH) { - if (!existsSync(configPath)) return { ...DEFAULTS } + if (!existsSync(configPath)) return {...DEFAULTS} try { const raw = readFileSync(configPath, 'utf8') - return { ...DEFAULTS, ...JSON.parse(raw) } + return {...DEFAULTS, ...JSON.parse(raw)} } catch { - return { ...DEFAULTS } + return {...DEFAULTS} } } diff --git a/src/services/docs.js b/src/services/docs.js index 2eec5a3..c40323a 100644 --- a/src/services/docs.js +++ b/src/services/docs.js @@ -1,7 +1,7 @@ -import { createOctokit } from './github.js' -import { exec } from './shell.js' -import { isOpenApi, isAsyncApi } from '../formatters/openapi.js' -import { load } from 'js-yaml' +import {createOctokit} from './github.js' +import {exec} from './shell.js' +import {isOpenApi, isAsyncApi} from '../formatters/openapi.js' +import {load} from 'js-yaml' /** @import { DocumentEntry, RepoDocsIndex, SearchMatch, DetectedRepo } from '../types.js' */ @@ -18,7 +18,7 @@ export async function detectCurrentRepo() { if (!match) { throw new Error('Could not detect GitHub repository from git remote. Use --repo to specify a repository.') } - return { owner: match[1], repo: match[2] } + return {owner: match[1], repo: match[2]} } /** @@ -27,22 +27,22 @@ export async function detectCurrentRepo() { * @returns {DocumentEntry|null} */ function classifyEntry(entry) { - const { size } = entry + const {size} = entry const path = entry.path if (size === 0) return null const name = path.split('/').pop() ?? path if (/^readme\.(md|rst|txt)$/i.test(path)) { - return { name, path, type: 'readme', size } + return {name, path, type: 'readme', size} } if (/(openapi|swagger)\.(ya?ml|json)$/i.test(path)) { - return { name, path, type: 'swagger', size } + return {name, path, type: 'swagger', size} } if (/asyncapi\.(ya?ml|json)$/i.test(path)) { - return { name, path, type: 'asyncapi', size } + return {name, path, type: 'asyncapi', size} } if (path.startsWith('docs/') && /\.(md|rst|txt)$/.test(path)) { - return { name, path, type: 'doc', size } + return {name, path, type: 'doc', size} } return null } @@ -54,7 +54,7 @@ function classifyEntry(entry) { * @returns {number} */ function sortEntries(a, b) { - const order = { readme: 0, swagger: 1, asyncapi: 2, doc: 3 } + const order = {readme: 0, swagger: 1, asyncapi: 2, doc: 3} const diff = order[a.type] - order[b.type] return diff !== 0 ? diff : a.path.localeCompare(b.path) } @@ -69,18 +69,18 @@ export async function listDocs(owner, repo) { const octokit = await createOctokit() // 1. Get default branch - const { data: repoData } = await octokit.rest.repos.get({ owner, repo }) + const {data: repoData} = await octokit.rest.repos.get({owner, repo}) const defaultBranch = repoData.default_branch // 2. Get HEAD SHA - const { data: ref } = await octokit.rest.git.getRef({ + const {data: ref} = await octokit.rest.git.getRef({ owner, repo, ref: `heads/${defaultBranch}`, }) // 3. Fetch full recursive tree - const { data: tree } = await octokit.rest.git.getTree({ + const {data: tree} = await octokit.rest.git.getTree({ owner, repo, tree_sha: ref.object.sha, @@ -91,7 +91,7 @@ export async function listDocs(owner, repo) { const entries = [] for (const e of tree.tree) { if (e.type !== 'blob') continue - const entry = classifyEntry({ path: e.path ?? '', size: e.size ?? 0 }) + const entry = classifyEntry({path: e.path ?? '', size: e.size ?? 0}) if (entry) entries.push(entry) } return entries.sort(sortEntries) @@ -106,7 +106,7 @@ export async function listDocs(owner, repo) { */ export async function readFile(owner, repo, path) { const octokit = await createOctokit() - const { data } = await octokit.rest.repos.getContent({ owner, repo, path }) + const {data} = await octokit.rest.repos.getContent({owner, repo, path}) if (Array.isArray(data) || data.type !== 'file') { throw new Error(`"${path}" is not a file.`) } @@ -198,9 +198,7 @@ export function detectApiSpecType(path, content) { if (/asyncapi\.(ya?ml|json)$/i.test(path)) return 'asyncapi' // Try to detect from content try { - const doc = /^\s*\{/.test(content.trim()) - ? JSON.parse(content) - : load(content) + const doc = /^\s*\{/.test(content.trim()) ? JSON.parse(content) : load(content) if (doc && typeof doc === 'object') { if (isOpenApi(/** @type {Record} */ (doc))) return 'swagger' if (isAsyncApi(/** @type {Record} */ (doc))) return 'asyncapi' @@ -208,7 +206,8 @@ export function detectApiSpecType(path, content) { } catch (err) { // File content is not valid YAML/JSON — not an API spec, return null. // Log at debug level for troubleshooting without exposing parse errors to users. - if (process.env.DVMI_DEBUG) process.stderr.write(`[detectApiSpecType] parse failed: ${/** @type {Error} */ (err).message}\n`) + if (process.env.DVMI_DEBUG) + process.stderr.write(`[detectApiSpecType] parse failed: ${/** @type {Error} */ (err).message}\n`) } return null } diff --git a/src/services/dotfiles.js b/src/services/dotfiles.js index 5c1ae91..18e503a 100644 --- a/src/services/dotfiles.js +++ b/src/services/dotfiles.js @@ -1,8 +1,8 @@ -import { homedir } from 'node:os' -import { existsSync } from 'node:fs' -import { join } from 'node:path' -import { which, exec, execOrThrow } from './shell.js' -import { loadConfig, saveConfig } from './config.js' +import {homedir} from 'node:os' +import {existsSync} from 'node:fs' +import {join} from 'node:path' +import {which, exec, execOrThrow} from './shell.js' +import {loadConfig, saveConfig} from './config.js' /** @import { Platform, DotfileEntry, DotfileRecommendation, DotfilesSetupResult, DotfilesAddResult, SetupStep, StepResult, CLIConfig } from '../types.js' */ @@ -31,26 +31,116 @@ export const SENSITIVE_PATTERNS = [ */ export const DEFAULT_FILE_LIST = [ // Shell - { path: '~/.zshrc', category: 'shell', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Zsh configuration' }, - { path: '~/.bashrc', category: 'shell', platforms: ['linux', 'wsl2'], autoEncrypt: false, description: 'Bash configuration' }, - { path: '~/.bash_profile', category: 'shell', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Bash profile' }, - { path: '~/.zprofile', category: 'shell', platforms: ['macos'], autoEncrypt: false, description: 'Zsh login profile' }, - { path: '~/.config/fish/config.fish', category: 'shell', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Fish shell configuration' }, + { + path: '~/.zshrc', + category: 'shell', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Zsh configuration', + }, + { + path: '~/.bashrc', + category: 'shell', + platforms: ['linux', 'wsl2'], + autoEncrypt: false, + description: 'Bash configuration', + }, + { + path: '~/.bash_profile', + category: 'shell', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Bash profile', + }, + {path: '~/.zprofile', category: 'shell', platforms: ['macos'], autoEncrypt: false, description: 'Zsh login profile'}, + { + path: '~/.config/fish/config.fish', + category: 'shell', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Fish shell configuration', + }, // Git - { path: '~/.gitconfig', category: 'git', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Git global config' }, - { path: '~/.gitignore_global', category: 'git', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Global gitignore patterns' }, + { + path: '~/.gitconfig', + category: 'git', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Git global config', + }, + { + path: '~/.gitignore_global', + category: 'git', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Global gitignore patterns', + }, // Editor - { path: '~/.vimrc', category: 'editor', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Vim configuration' }, - { path: '~/.config/nvim/init.vim', category: 'editor', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Neovim configuration' }, - { path: '~/.config/nvim/init.lua', category: 'editor', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Neovim Lua configuration' }, + { + path: '~/.vimrc', + category: 'editor', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Vim configuration', + }, + { + path: '~/.config/nvim/init.vim', + category: 'editor', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Neovim configuration', + }, + { + path: '~/.config/nvim/init.lua', + category: 'editor', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Neovim Lua configuration', + }, // Package / macOS-specific - { path: '~/.Brewfile', category: 'package', platforms: ['macos'], autoEncrypt: false, description: 'Homebrew bundle file' }, - { path: '~/.config/nvim', category: 'editor', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: false, description: 'Neovim config directory' }, + { + path: '~/.Brewfile', + category: 'package', + platforms: ['macos'], + autoEncrypt: false, + description: 'Homebrew bundle file', + }, + { + path: '~/.config/nvim', + category: 'editor', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: false, + description: 'Neovim config directory', + }, // Security - { path: '~/.ssh/config', category: 'security', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: true, description: 'SSH client configuration (auto-encrypted)' }, - { path: '~/.ssh/id_ed25519', category: 'security', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: true, description: 'SSH private key (auto-encrypted)' }, - { path: '~/.ssh/id_rsa', category: 'security', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: true, description: 'SSH RSA private key (auto-encrypted)' }, - { path: '~/.gnupg/pubring.kbx', category: 'security', platforms: ['macos', 'linux', 'wsl2'], autoEncrypt: true, description: 'GPG public keyring (auto-encrypted)' }, + { + path: '~/.ssh/config', + category: 'security', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: true, + description: 'SSH client configuration (auto-encrypted)', + }, + { + path: '~/.ssh/id_ed25519', + category: 'security', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: true, + description: 'SSH private key (auto-encrypted)', + }, + { + path: '~/.ssh/id_rsa', + category: 'security', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: true, + description: 'SSH RSA private key (auto-encrypted)', + }, + { + path: '~/.gnupg/pubring.kbx', + category: 'security', + platforms: ['macos', 'linux', 'wsl2'], + autoEncrypt: true, + description: 'GPG public keyring (auto-encrypted)', + }, ] // --------------------------------------------------------------------------- @@ -149,11 +239,12 @@ function globToRegex(pattern) { const expanded = expandTilde(pattern) // Split on `**` to handle double-star separately const parts = expanded.split('**') - const escaped = parts.map((part) => - part - .replace(/[.+^${}()|[\]\\]/g, '\\$&') // escape regex special chars - .replace(/\*/g, '[^/]*') // single * → any non-separator - .replace(/\?/g, '[^/]'), // ? → any single non-separator char + const escaped = parts.map( + (part) => + part + .replace(/[.+^${}()|[\]\\]/g, '\\$&') // escape regex special chars + .replace(/\*/g, '[^/]*') // single * → any non-separator + .replace(/\?/g, '[^/]'), // ? → any single non-separator char ) const src = escaped.join('.*') // ** → match anything including / return new RegExp(`^${src}$`, 'i') @@ -246,14 +337,15 @@ export function buildSetupSteps(platform, options = {}) { run: async () => { const installed = await isChezmoiInstalled() if (!installed) { - const hint = platform === 'macos' - ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' - : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' - return { status: 'failed', hint } + const hint = + platform === 'macos' + ? 'Run `brew install chezmoi` or visit https://chezmoi.io/install' + : 'Run `sh -c "$(curl -fsLS get.chezmoi.io)"` or visit https://chezmoi.io/install' + return {status: 'failed', hint} } const result = await exec('chezmoi', ['--version']) const version = (result.stdout || result.stderr).trim() - return { status: 'success', message: `chezmoi ${version}` } + return {status: 'success', message: `chezmoi ${version}`} }, }) @@ -267,13 +359,13 @@ export function buildSetupSteps(platform, options = {}) { run: async () => { const config = options.existingConfig !== undefined ? options.existingConfig : await getChezmoiConfig() if (!config) { - return { status: 'success', message: 'No existing configuration — fresh setup' } + return {status: 'success', message: 'No existing configuration — fresh setup'} } const hasEncryption = config.encryption?.tool === 'age' || !!config.age?.identity if (hasEncryption) { - return { status: 'skipped', message: 'Age encryption already configured' } + return {status: 'skipped', message: 'Age encryption already configured'} } - return { status: 'success', message: 'Existing config found without encryption — will add age' } + return {status: 'success', message: 'Existing config found without encryption — will add age'} }, }) @@ -287,18 +379,18 @@ export function buildSetupSteps(platform, options = {}) { run: async () => { // Skip if key already exists if (existsSync(ageKeyPath)) { - return { status: 'skipped', message: `Age key already exists at ${ageKeyPath}` } + return {status: 'skipped', message: `Age key already exists at ${ageKeyPath}`} } try { // chezmoi uses `age-keygen` via its own embedded command await execOrThrow('chezmoi', ['age', 'keygen', '-o', ageKeyPath]) - return { status: 'success', message: `Age key generated at ${ageKeyPath}` } + return {status: 'success', message: `Age key generated at ${ageKeyPath}`} } catch { // Fallback: try standalone age-keygen try { // age-keygen writes public key to stderr, private key to file await execOrThrow('age-keygen', ['-o', ageKeyPath]) - return { status: 'success', message: `Age key generated at ${ageKeyPath}` } + return {status: 'success', message: `Age key generated at ${ageKeyPath}`} } catch { return { status: 'failed', @@ -337,11 +429,14 @@ export function buildSetupSteps(platform, options = {}) { .filter((l) => l !== undefined) .join('\n') - const { writeFile, mkdir } = await import('node:fs/promises') - await mkdir(chezmoiConfigDir, { recursive: true }) + const {writeFile, mkdir} = await import('node:fs/promises') + await mkdir(chezmoiConfigDir, {recursive: true}) await writeFile(configPath, tomlContent, 'utf8') - return { status: 'success', message: `chezmoi.toml written with age encryption${publicKey ? ` (public key: ${publicKey.slice(0, 16)}...)` : ''}` } + return { + status: 'success', + message: `chezmoi.toml written with age encryption${publicKey ? ` (public key: ${publicKey.slice(0, 16)}...)` : ''}`, + } } catch (err) { return { status: 'failed', @@ -363,14 +458,14 @@ export function buildSetupSteps(platform, options = {}) { await execOrThrow('chezmoi', ['init']) const configResult = await getChezmoiConfig() const sourceDir = configResult?.sourceDir ?? configResult?.sourcePath ?? null - return { status: 'success', message: sourceDir ? `Source dir: ${sourceDir}` : 'chezmoi initialised' } + return {status: 'success', message: sourceDir ? `Source dir: ${sourceDir}` : 'chezmoi initialised'} } catch { // init may fail if already initialised — that's ok const configResult = await getChezmoiConfig() if (configResult) { - return { status: 'skipped', message: 'chezmoi already initialised' } + return {status: 'skipped', message: 'chezmoi already initialised'} } - return { status: 'failed', hint: 'Run `chezmoi doctor` to diagnose init failure' } + return {status: 'failed', hint: 'Run `chezmoi doctor` to diagnose init failure'} } }, }) @@ -385,9 +480,9 @@ export function buildSetupSteps(platform, options = {}) { run: async () => { try { const config = await loadConfig() - config.dotfiles = { ...config.dotfiles, enabled: true } + config.dotfiles = {...config.dotfiles, enabled: true} await saveConfig(config) - return { status: 'success', message: 'dvmi config updated: dotfiles.enabled = true' } + return {status: 'success', message: 'dvmi config updated: dotfiles.enabled = true'} } catch (err) { return { status: 'failed', @@ -456,8 +551,8 @@ export async function setupChezmoiInline(platform) { .filter((l) => l !== undefined) .join('\n') - const { writeFile, mkdir } = await import('node:fs/promises') - await mkdir(chezmoiConfigDir, { recursive: true }) + const {writeFile, mkdir} = await import('node:fs/promises') + await mkdir(chezmoiConfigDir, {recursive: true}) await writeFile(configPath, tomlContent, 'utf8') // Init chezmoi @@ -469,7 +564,7 @@ export async function setupChezmoiInline(platform) { // Save dvmi config const dvmiConfig = await loadConfig() - dvmiConfig.dotfiles = { ...(dvmiConfig.dotfiles ?? {}), enabled: true } + dvmiConfig.dotfiles = {...(dvmiConfig.dotfiles ?? {}), enabled: true} await saveConfig(dvmiConfig) return { @@ -519,18 +614,21 @@ export function buildAddSteps(files, platform) { run: async () => { // V-001: file must exist if (!existsSync(absPath)) { - return { status: 'skipped', message: `${file.path}: file not found` } + return {status: 'skipped', message: `${file.path}: file not found`} } // V-002: WSL2 Windows path rejection if (platform === 'wsl2' && isWSLWindowsPath(absPath)) { - return { status: 'failed', hint: `${file.path}: Windows filesystem paths not supported on WSL2. Use Linux-native paths (~/) instead.` } + return { + status: 'failed', + hint: `${file.path}: Windows filesystem paths not supported on WSL2. Use Linux-native paths (~/) instead.`, + } } try { const args = ['add'] if (file.encrypt) args.push('--encrypt') args.push(absPath) await execOrThrow('chezmoi', args) - return { status: 'success', message: `${file.path} added${file.encrypt ? ' (encrypted)' : ''}` } + return {status: 'success', message: `${file.path} added${file.encrypt ? ' (encrypted)' : ''}`} } catch { return { status: 'failed', diff --git a/src/services/github.js b/src/services/github.js index 01f9da6..2fe6867 100644 --- a/src/services/github.js +++ b/src/services/github.js @@ -1,6 +1,6 @@ -import { Octokit } from 'octokit' -import { exec } from './shell.js' -import { AuthError } from '../utils/errors.js' +import {Octokit} from 'octokit' +import {exec} from './shell.js' +import {AuthError} from '../utils/errors.js' /** @import { Template, Repository, PullRequest, PRComment, QAStep, PRDetail, PipelineRun } from '../types.js' */ @@ -21,7 +21,7 @@ async function getToken() { export async function createOctokit() { const token = await getToken() const baseUrl = process.env.GITHUB_API_URL ?? 'https://api.github.com' - return new Octokit({ auth: token, baseUrl }) + return new Octokit({auth: token, baseUrl}) } /** @@ -47,9 +47,7 @@ export async function listRepos(org, filters = {}) { isPrivate: r.private, })) if (filters.language) { - results = results.filter( - (r) => r.language?.toLowerCase() === filters.language?.toLowerCase(), - ) + results = results.filter((r) => r.language?.toLowerCase() === filters.language?.toLowerCase()) } if (filters.topic) { results = results.filter((r) => r.topics.includes(filters.topic ?? '')) @@ -87,7 +85,7 @@ export async function listTemplates(org) { */ export async function createFromTemplate(opts) { const octokit = await createOctokit() - const { data } = await octokit.rest.repos.createUsingTemplate({ + const {data} = await octokit.rest.repos.createUsingTemplate({ template_owner: opts.templateOwner, template_repo: opts.templateRepo, name: opts.name, @@ -96,7 +94,7 @@ export async function createFromTemplate(opts) { private: opts.isPrivate, include_all_branches: false, }) - return { name: data.name, htmlUrl: data.html_url, cloneUrl: data.clone_url } + return {name: data.name, htmlUrl: data.html_url, cloneUrl: data.clone_url} } /** @@ -113,7 +111,7 @@ export async function setBranchProtection(owner, repo) { branch: 'main', required_status_checks: null, enforce_admins: false, - required_pull_request_reviews: { required_approving_review_count: 0 }, + required_pull_request_reviews: {required_approving_review_count: 0}, restrictions: null, allow_force_pushes: false, allow_deletions: false, @@ -128,8 +126,8 @@ export async function setBranchProtection(owner, repo) { */ export async function enableDependabot(owner, repo) { const octokit = await createOctokit() - await octokit.rest.repos.enableAutomatedSecurityFixes({ owner, repo }) - await octokit.rest.repos.enableVulnerabilityAlerts({ owner, repo }) + await octokit.rest.repos.enableAutomatedSecurityFixes({owner, repo}) + await octokit.rest.repos.enableVulnerabilityAlerts({owner, repo}) } /** @@ -139,7 +137,7 @@ export async function enableDependabot(owner, repo) { */ export async function createPR(opts) { const octokit = await createOctokit() - const { data } = await octokit.rest.pulls.create({ + const {data} = await octokit.rest.pulls.create({ owner: opts.owner, repo: opts.repo, title: opts.title, @@ -164,7 +162,7 @@ export async function createPR(opts) { reviewers: opts.reviewers, }) } - return { number: data.number, htmlUrl: data.html_url } + return {number: data.number, htmlUrl: data.html_url} } /** @@ -174,7 +172,7 @@ export async function createPR(opts) { */ export async function listMyPRs(org) { const octokit = await createOctokit() - const { data: user } = await octokit.rest.users.getAuthenticated() + const {data: user} = await octokit.rest.users.getAuthenticated() const login = user.login const [authoredRes, reviewingRes] = await Promise.all([ @@ -227,9 +225,9 @@ export async function listWorkflowRuns(owner, repo, filters = {}) { owner, repo, per_page: filters.limit ?? 10, - ...(filters.branch ? { branch: filters.branch } : {}), + ...(filters.branch ? {branch: filters.branch} : {}), } - const { data } = await octokit.rest.actions.listWorkflowRunsForRepo(params) + const {data} = await octokit.rest.actions.listWorkflowRunsForRepo(params) return data.workflow_runs.map((run) => { const start = new Date(run.created_at) const end = run.updated_at ? new Date(run.updated_at) : new Date() @@ -259,9 +257,9 @@ export async function listWorkflowRuns(owner, repo, filters = {}) { export async function rerunWorkflow(owner, repo, runId, failedOnly = false) { const octokit = await createOctokit() if (failedOnly) { - await octokit.rest.actions.reRunWorkflowFailedJobs({ owner, repo, run_id: runId }) + await octokit.rest.actions.reRunWorkflowFailedJobs({owner, repo, run_id: runId}) } else { - await octokit.rest.actions.reRunWorkflow({ owner, repo, run_id: runId }) + await octokit.rest.actions.reRunWorkflow({owner, repo, run_id: runId}) } } @@ -277,7 +275,7 @@ export async function searchCode(org, query, opts = {}) { let q = `${query} org:${org}` if (opts.language) q += ` language:${opts.language}` if (opts.repo) q += ` repo:${org}/${opts.repo}` - const { data } = await octokit.rest.search.code({ q, per_page: opts.limit ?? 20 }) + const {data} = await octokit.rest.search.code({q, per_page: opts.limit ?? 20}) return data.items.map((item) => ({ repo: item.repository.name, file: item.path, @@ -297,7 +295,7 @@ export function extractQASteps(body) { for (const line of body.split('\n')) { const match = line.match(/^\s*-\s*\[([xX ])\]\s+(.+)/) if (match) { - steps.push({ text: match[2].trim(), checked: match[1].toLowerCase() === 'x' }) + steps.push({text: match[2].trim(), checked: match[1].toLowerCase() === 'x'}) } } return steps @@ -331,9 +329,9 @@ export async function getPRDetail(owner, repo, prNumber) { const octokit = await createOctokit() const [prRes, commentsRes, reviewsRes] = await Promise.all([ - octokit.rest.pulls.get({ owner, repo, pull_number: prNumber }), - octokit.rest.issues.listComments({ owner, repo, issue_number: prNumber, per_page: 100 }), - octokit.rest.pulls.listReviews({ owner, repo, pull_number: prNumber, per_page: 100 }), + octokit.rest.pulls.get({owner, repo, pull_number: prNumber}), + octokit.rest.issues.listComments({owner, repo, issue_number: prNumber, per_page: 100}), + octokit.rest.pulls.listReviews({owner, repo, pull_number: prNumber, per_page: 100}), ]) const pr = prRes.data diff --git a/src/services/nvd.js b/src/services/nvd.js index a749826..ee69d38 100644 --- a/src/services/nvd.js +++ b/src/services/nvd.js @@ -1,13 +1,12 @@ -import { loadConfig } from './config.js' -import { DvmiError } from '../utils/errors.js' +import {loadConfig} from './config.js' +import {DvmiError} from '../utils/errors.js' /** @import { CveSearchResult, CveDetail } from '../types.js' */ const NVD_BASE_URL = 'https://services.nvd.nist.gov/rest/json/cves/2.0' /** NVD attribution required in all interactive output. */ -export const NVD_ATTRIBUTION = - 'This product uses data from the NVD API but is not endorsed or certified by the NVD.' +export const NVD_ATTRIBUTION = 'This product uses data from the NVD API but is not endorsed or certified by the NVD.' /** * Normalize a raw NVD severity string to the 4-tier canonical form. @@ -31,11 +30,7 @@ export function normalizeSeverity(raw) { * @returns {{ score: number|null, severity: string, vector: string|null }} */ function extractCvss(metrics) { - const sources = [ - (metrics?.cvssMetricV31 ?? []), - (metrics?.cvssMetricV40 ?? []), - (metrics?.cvssMetricV2 ?? []), - ] + const sources = [metrics?.cvssMetricV31 ?? [], metrics?.cvssMetricV40 ?? [], metrics?.cvssMetricV2 ?? []] for (const list of sources) { if (Array.isArray(list) && list.length > 0) { @@ -50,7 +45,7 @@ function extractCvss(metrics) { } } - return { score: null, severity: 'Unknown', vector: null } + return {score: null, severity: 'Unknown', vector: null} } /** @@ -88,15 +83,12 @@ function buildParams(params) { async function nvdFetch(params, apiKey) { const url = `${NVD_BASE_URL}?${params.toString()}` /** @type {Record} */ - const headers = { Accept: 'application/json' } + const headers = {Accept: 'application/json'} if (apiKey) headers['apiKey'] = apiKey - const res = await fetch(url, { headers }) + const res = await fetch(url, {headers}) if (!res.ok) { - throw new DvmiError( - `NVD API returned HTTP ${res.status}`, - 'Check your network connection or try again later.', - ) + throw new DvmiError(`NVD API returned HTTP ${res.status}`, 'Check your network connection or try again later.') } return res.json() } @@ -108,7 +100,7 @@ async function nvdFetch(params, apiKey) { */ function parseCveSearchResult(raw) { const cve = raw.cve - const { score, severity } = extractCvss(cve.metrics ?? {}) + const {score, severity} = extractCvss(cve.metrics ?? {}) return { id: cve.id, description: getEnDescription(cve.descriptions), @@ -127,7 +119,7 @@ function parseCveSearchResult(raw) { */ function parseCveDetail(raw) { const cve = raw.cve - const { score, severity, vector } = extractCvss(cve.metrics ?? {}) + const {score, severity, vector} = extractCvss(cve.metrics ?? {}) // Weaknesses: flatten all CWE descriptions const weaknesses = (cve.weaknesses ?? []).flatMap((w) => @@ -149,10 +141,11 @@ function parseCveDetail(raw) { const product = parts[4] ?? 'unknown' const versionStart = m.versionStartIncluding ?? m.versionStartExcluding ?? '' const versionEnd = m.versionEndExcluding ?? m.versionEndIncluding ?? '' - const versions = versionStart && versionEnd - ? `${versionStart} to ${versionEnd}` - : versionStart || versionEnd || (parts[5] ?? '*') - return { vendor, product, versions } + const versions = + versionStart && versionEnd + ? `${versionStart} to ${versionEnd}` + : versionStart || versionEnd || (parts[5] ?? '*') + return {vendor, product, versions} }), ), ) @@ -188,7 +181,7 @@ function parseCveDetail(raw) { * @param {number} [options.limit=20] - Maximum results to return * @returns {Promise<{ results: CveSearchResult[], totalResults: number }>} */ -export async function searchCves({ keyword, days = 14, severity, limit = 20 }) { +export async function searchCves({keyword, days = 14, severity, limit = 20}) { const config = await loadConfig() const apiKey = config.nvd?.apiKey @@ -202,17 +195,17 @@ export async function searchCves({ keyword, days = 14, severity, limit = 20 }) { const trimmedKeyword = keyword?.trim() const params = buildParams({ - ...(trimmedKeyword ? { keywordSearch: trimmedKeyword } : {}), + ...(trimmedKeyword ? {keywordSearch: trimmedKeyword} : {}), pubStartDate, pubEndDate, resultsPerPage: limit, - ...(severity ? { cvssV3Severity: severity.toUpperCase() } : {}), + ...(severity ? {cvssV3Severity: severity.toUpperCase()} : {}), }) const data = /** @type {any} */ (await nvdFetch(params, apiKey)) const results = (data.vulnerabilities ?? []).map(parseCveSearchResult) - return { results, totalResults: data.totalResults ?? results.length } + return {results, totalResults: data.totalResults ?? results.length} } /** @@ -231,14 +224,11 @@ export async function getCveDetail(cveId) { const config = await loadConfig() const apiKey = config.nvd?.apiKey - const params = buildParams({ cveId: cveId.toUpperCase() }) + const params = buildParams({cveId: cveId.toUpperCase()}) const data = /** @type {any} */ (await nvdFetch(params, apiKey)) if (!data.vulnerabilities || data.vulnerabilities.length === 0) { - throw new DvmiError( - `CVE not found: ${cveId}`, - 'Verify the CVE ID is correct and exists in the NVD database.', - ) + throw new DvmiError(`CVE not found: ${cveId}`, 'Verify the CVE ID is correct and exists in the NVD database.') } return parseCveDetail(data.vulnerabilities[0]) diff --git a/src/services/platform.js b/src/services/platform.js index fdf78e6..fcb6612 100644 --- a/src/services/platform.js +++ b/src/services/platform.js @@ -1,5 +1,5 @@ -import { readFile } from 'node:fs/promises' -import { existsSync } from 'node:fs' +import {readFile} from 'node:fs/promises' +import {existsSync} from 'node:fs' /** @import { Platform, PlatformInfo } from '../types.js' */ diff --git a/src/services/prompts.js b/src/services/prompts.js index fea365e..f4efecc 100644 --- a/src/services/prompts.js +++ b/src/services/prompts.js @@ -1,10 +1,10 @@ -import { mkdir, writeFile, readFile, access } from 'node:fs/promises' -import { join, dirname, resolve, sep } from 'node:path' -import { execa } from 'execa' -import { createOctokit } from './github.js' -import { which } from './shell.js' -import { parseFrontmatter, serializeFrontmatter } from '../utils/frontmatter.js' -import { DvmiError } from '../utils/errors.js' +import {mkdir, writeFile, readFile, access} from 'node:fs/promises' +import {join, dirname, resolve, sep} from 'node:path' +import {execa} from 'execa' +import {createOctokit} from './github.js' +import {which} from './shell.js' +import {parseFrontmatter, serializeFrontmatter} from '../utils/frontmatter.js' +import {DvmiError} from '../utils/errors.js' /** @import { Prompt, AITool } from '../types.js' */ @@ -13,15 +13,15 @@ import { DvmiError } from '../utils/errors.js' * @type {Record} */ export const SUPPORTED_TOOLS = { - opencode: { bin: ['opencode'], promptFlag: '--prompt' }, - copilot: { bin: ['gh', 'copilot'], promptFlag: '-p' }, + opencode: {bin: ['opencode'], promptFlag: '--prompt'}, + copilot: {bin: ['gh', 'copilot'], promptFlag: '-p'}, } /** * GitHub repository containing the personal prompt collection. * @type {{ owner: string, repo: string }} */ -export const PROMPT_REPO = { owner: 'savez', repo: 'prompt-for-ai' } +export const PROMPT_REPO = {owner: 'savez', repo: 'prompt-for-ai'} /** * Default branch used when fetching the repository tree. @@ -75,7 +75,7 @@ function categoryFromPath(filePath) { */ function contentToPrompt(path, base64Content) { const raw = Buffer.from(base64Content, 'base64').toString('utf8') - const { frontmatter, body } = parseFrontmatter(raw) + const {frontmatter, body} = parseFrontmatter(raw) return { path, title: typeof frontmatter.title === 'string' ? frontmatter.title : titleFromPath(path), @@ -101,7 +101,7 @@ export async function listPrompts() { const octokit = await createOctokit() let tree try { - const { data } = await octokit.rest.git.getTree({ + const {data} = await octokit.rest.git.getTree({ owner: PROMPT_REPO.owner, repo: PROMPT_REPO.repo, tree_sha: DEFAULT_BRANCH, @@ -132,7 +132,7 @@ export async function listPrompts() { const prompts = await Promise.all( mdFiles.map(async (item) => { - const { data } = await octokit.rest.repos.getContent({ + const {data} = await octokit.rest.repos.getContent({ owner: PROMPT_REPO.owner, repo: PROMPT_REPO.repo, path: item.path ?? '', @@ -169,10 +169,7 @@ export async function fetchPromptByPath(relativePath) { } catch (err) { const status = /** @type {{ status?: number }} */ (err).status if (status === 404) { - throw new DvmiError( - `Prompt not found: ${relativePath}`, - `Run \`dvmi prompts list\` to see available prompts`, - ) + throw new DvmiError(`Prompt not found: ${relativePath}`, `Run \`dvmi prompts list\` to see available prompts`) } throw err } @@ -207,17 +204,14 @@ export async function downloadPrompt(relativePath, localDir, opts = {}) { // Prevent path traversal: destPath must remain within localDir const safeBase = resolve(localDir) + sep if (!resolve(destPath).startsWith(safeBase)) { - throw new DvmiError( - `Invalid prompt path: "${relativePath}"`, - 'Path must stay within the prompts directory', - ) + throw new DvmiError(`Invalid prompt path: "${relativePath}"`, 'Path must stay within the prompts directory') } // Fast-path: skip without a network round-trip if file exists and no overwrite if (!opts.overwrite) { try { await access(destPath) - return { path: destPath, skipped: true } + return {path: destPath, skipped: true} } catch { // File does not exist — fall through to download } @@ -237,10 +231,10 @@ export async function downloadPrompt(relativePath, localDir, opts = {}) { const content = serializeFrontmatter(fm, prompt.body) - await mkdir(dirname(destPath), { recursive: true, mode: 0o700 }) - await writeFile(destPath, content, { encoding: 'utf8', mode: 0o600 }) + await mkdir(dirname(destPath), {recursive: true, mode: 0o700}) + await writeFile(destPath, content, {encoding: 'utf8', mode: 0o600}) - return { path: destPath, skipped: false } + return {path: destPath, skipped: false} } /** @@ -258,10 +252,7 @@ export async function resolveLocalPrompt(relativePath, localDir) { // Prevent path traversal: fullPath must remain within localDir const safeBase = resolve(localDir) + sep if (!resolve(fullPath).startsWith(safeBase)) { - throw new DvmiError( - `Invalid prompt path: "${relativePath}"`, - 'Path must stay within the prompts directory', - ) + throw new DvmiError(`Invalid prompt path: "${relativePath}"`, 'Path must stay within the prompts directory') } let raw @@ -274,7 +265,7 @@ export async function resolveLocalPrompt(relativePath, localDir) { ) } - const { frontmatter, body } = parseFrontmatter(raw) + const {frontmatter, body} = parseFrontmatter(raw) return { path: relativePath, title: typeof frontmatter.title === 'string' ? frontmatter.title : titleFromPath(relativePath), @@ -301,10 +292,7 @@ export async function resolveLocalPrompt(relativePath, localDir) { export async function invokeTool(toolName, promptContent) { const tool = SUPPORTED_TOOLS[toolName] if (!tool) { - throw new DvmiError( - `Unknown AI tool: "${toolName}"`, - `Supported tools: ${Object.keys(SUPPORTED_TOOLS).join(', ')}`, - ) + throw new DvmiError(`Unknown AI tool: "${toolName}"`, `Supported tools: ${Object.keys(SUPPORTED_TOOLS).join(', ')}`) } // Verify binary availability @@ -322,5 +310,5 @@ export async function invokeTool(toolName, promptContent) { } // Spawn tool with prompt content — inherits stdio so TUI/interactive tools work - await execa(bin, [...subArgs, tool.promptFlag, promptContent], { stdio: 'inherit' }) + await execa(bin, [...subArgs, tool.promptFlag, promptContent], {stdio: 'inherit'}) } diff --git a/src/services/security.js b/src/services/security.js index 07cd640..38a21c7 100644 --- a/src/services/security.js +++ b/src/services/security.js @@ -1,8 +1,8 @@ -import { homedir } from 'node:os' -import { join } from 'node:path' -import { readFile, appendFile } from 'node:fs/promises' -import { existsSync } from 'node:fs' -import { which, exec, execOrThrow } from './shell.js' +import {homedir} from 'node:os' +import {join} from 'node:path' +import {readFile, appendFile} from 'node:fs/promises' +import {existsSync} from 'node:fs' +import {which, exec, execOrThrow} from './shell.js' /** @import { Platform, PlatformInfo, SecurityTool, SecurityToolStatus, SetupStep, StepResult, GpgKey } from '../types.js' */ @@ -74,14 +74,20 @@ export async function checkToolStatus(platform) { for (const tool of TOOL_DEFINITIONS) { if (!tool.platforms.includes(platform)) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'n/a', version: null, hint: null }) + results.push({id: tool.id, displayName: tool.displayName, status: 'n/a', version: null, hint: null}) continue } if (tool.id === 'aws-vault') { const path = await which('aws-vault') if (!path) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'not-installed', version: null, hint: 'Install aws-vault' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'not-installed', + version: null, + hint: 'Install aws-vault', + }) continue } const versionResult = await exec('aws-vault', ['--version']) @@ -90,30 +96,60 @@ export async function checkToolStatus(platform) { if (platform !== 'macos') { const backend = process.env.AWS_VAULT_BACKEND if (backend !== 'pass') { - results.push({ id: tool.id, displayName: tool.displayName, status: 'misconfigured', version: version || null, hint: 'Add export AWS_VAULT_BACKEND=pass to your shell profile' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'misconfigured', + version: version || null, + hint: 'Add export AWS_VAULT_BACKEND=pass to your shell profile', + }) continue } } - results.push({ id: tool.id, displayName: tool.displayName, status: 'installed', version: version || null, hint: null }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'installed', + version: version || null, + hint: null, + }) continue } if (tool.id === 'gpg') { const path = await which('gpg') if (!path) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'not-installed', version: null, hint: 'Install gnupg via your package manager' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'not-installed', + version: null, + hint: 'Install gnupg via your package manager', + }) continue } const versionResult = await exec('gpg', ['--version']) const match = versionResult.stdout.match(/gpg \(GnuPG\)\s+([\d.]+)/) - results.push({ id: tool.id, displayName: tool.displayName, status: 'installed', version: match ? match[1] : null, hint: null }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'installed', + version: match ? match[1] : null, + hint: null, + }) continue } if (tool.id === 'pass') { const path = await which('pass') if (!path) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'not-installed', version: null, hint: 'Install pass via your package manager' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'not-installed', + version: null, + hint: 'Install pass via your package manager', + }) continue } const versionResult = await exec('pass', ['--version']) @@ -121,19 +157,37 @@ export async function checkToolStatus(platform) { // Check if pass is initialized const lsResult = await exec('pass', ['ls']) if (lsResult.exitCode !== 0) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'misconfigured', version: match ? match[1] : null, hint: 'Initialize pass with: pass init ' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'misconfigured', + version: match ? match[1] : null, + hint: 'Initialize pass with: pass init ', + }) continue } - results.push({ id: tool.id, displayName: tool.displayName, status: 'installed', version: match ? match[1] : null, hint: null }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'installed', + version: match ? match[1] : null, + hint: null, + }) continue } if (tool.id === 'osxkeychain') { const result = await exec('git', ['config', '--global', 'credential.helper']) if (result.stdout === 'osxkeychain') { - results.push({ id: tool.id, displayName: tool.displayName, status: 'installed', version: null, hint: null }) + results.push({id: tool.id, displayName: tool.displayName, status: 'installed', version: null, hint: null}) } else { - results.push({ id: tool.id, displayName: tool.displayName, status: 'not-installed', version: null, hint: 'Run: git config --global credential.helper osxkeychain' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'not-installed', + version: null, + hint: 'Run: git config --global credential.helper osxkeychain', + }) } continue } @@ -141,17 +195,29 @@ export async function checkToolStatus(platform) { if (tool.id === 'gcm') { const path = await which('git-credential-manager') if (!path) { - results.push({ id: tool.id, displayName: tool.displayName, status: 'not-installed', version: null, hint: 'Install Git Credential Manager' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'not-installed', + version: null, + hint: 'Install Git Credential Manager', + }) continue } const versionResult = await exec('git-credential-manager', ['--version']) const version = versionResult.stdout.trim() || null const storeResult = await exec('git', ['config', '--global', 'credential.credentialStore']) if (storeResult.stdout !== 'gpg') { - results.push({ id: tool.id, displayName: tool.displayName, status: 'misconfigured', version, hint: 'Run: git config --global credential.credentialStore gpg' }) + results.push({ + id: tool.id, + displayName: tool.displayName, + status: 'misconfigured', + version, + hint: 'Run: git config --global credential.credentialStore gpg', + }) continue } - results.push({ id: tool.id, displayName: tool.displayName, status: 'installed', version, hint: null }) + results.push({id: tool.id, displayName: tool.displayName, status: 'installed', version, hint: null}) continue } } @@ -267,7 +333,7 @@ export function deriveOverallStatus(tools) { * @returns {SetupStep[]} */ export function buildSteps(platformInfo, selection, context = {}) { - const { platform } = platformInfo + const {platform} = platformInfo const includeAws = selection === 'aws' || selection === 'both' const includeGit = selection === 'git' || selection === 'both' @@ -291,7 +357,7 @@ export function buildSteps(platformInfo, selection, context = {}) { hintUrl: 'https://brew.sh', } } - return { status: 'success', message: 'Homebrew is available' } + return {status: 'success', message: 'Homebrew is available'} }, }) @@ -303,10 +369,10 @@ export function buildSteps(platformInfo, selection, context = {}) { requiresConfirmation: true, run: async () => { const existing = await which('aws-vault') - if (existing) return { status: 'skipped', message: 'aws-vault already installed' } + if (existing) return {status: 'skipped', message: 'aws-vault already installed'} try { await execOrThrow('brew', ['install', 'aws-vault']) - return { status: 'success', message: 'aws-vault installed via Homebrew' } + return {status: 'success', message: 'aws-vault installed via Homebrew'} } catch { return { status: 'failed', @@ -326,10 +392,10 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const result = await exec('aws-vault', ['--version']) if (result.exitCode !== 0) { - return { status: 'failed', hint: 'aws-vault not found in PATH after install' } + return {status: 'failed', hint: 'aws-vault not found in PATH after install'} } const version = (result.stdout || result.stderr).trim() - return { status: 'success', message: `aws-vault ${version}` } + return {status: 'success', message: `aws-vault ${version}`} }, }) } @@ -344,9 +410,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { try { await execOrThrow('git', ['config', '--global', 'credential.helper', 'osxkeychain']) - return { status: 'success', message: 'Git credential helper set to osxkeychain' } + return {status: 'success', message: 'Git credential helper set to osxkeychain'} } catch { - return { status: 'failed', hint: 'Run manually: git config --global credential.helper osxkeychain' } + return {status: 'failed', hint: 'Run manually: git config --global credential.helper osxkeychain'} } }, }) @@ -360,9 +426,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const result = await exec('git', ['config', '--global', 'credential.helper']) if (result.stdout !== 'osxkeychain') { - return { status: 'failed', hint: 'credential.helper is not set to osxkeychain' } + return {status: 'failed', hint: 'credential.helper is not set to osxkeychain'} } - return { status: 'success', message: 'osxkeychain is configured' } + return {status: 'success', message: 'osxkeychain is configured'} }, }) } @@ -378,11 +444,11 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const path = await which('gpg') if (!path) { - return { status: 'failed', hint: 'GPG not found — will be installed in the next step' } + return {status: 'failed', hint: 'GPG not found — will be installed in the next step'} } const result = await exec('gpg', ['--version']) const match = result.stdout.match(/gpg \(GnuPG\)\s+([\d.]+)/) - return { status: 'success', message: `GPG ${match ? match[1] : 'found'}` } + return {status: 'success', message: `GPG ${match ? match[1] : 'found'}`} }, }) @@ -395,12 +461,12 @@ export function buildSteps(platformInfo, selection, context = {}) { skippable: true, run: async () => { const path = await which('gpg') - if (path) return { status: 'skipped', message: 'GPG already installed' } + if (path) return {status: 'skipped', message: 'GPG already installed'} try { await execOrThrow('sudo', ['apt-get', 'install', '-y', 'gnupg']) - return { status: 'success', message: 'GPG installed' } + return {status: 'success', message: 'GPG installed'} } catch { - return { status: 'failed', hint: 'Run manually: sudo apt-get install -y gnupg' } + return {status: 'failed', hint: 'Run manually: sudo apt-get install -y gnupg'} } }, }) @@ -414,7 +480,7 @@ export function buildSteps(platformInfo, selection, context = {}) { gpgInteractive: true, run: async () => { const gpgId = context.gpgId - if (gpgId) return { status: 'skipped', message: `Using existing GPG key ${gpgId}` } + if (gpgId) return {status: 'skipped', message: `Using existing GPG key ${gpgId}`} // When gpgInteractive=true, the command layer stops the spinner and spawns // gpg --full-generate-key with stdio:inherit so the user sets a strong passphrase. // We never generate a key with an empty passphrase — that would leave the private @@ -436,12 +502,12 @@ export function buildSteps(platformInfo, selection, context = {}) { requiresConfirmation: true, run: async () => { const path = await which('pass') - if (path) return { status: 'skipped', message: 'pass already installed' } + if (path) return {status: 'skipped', message: 'pass already installed'} try { await execOrThrow('sudo', ['apt-get', 'install', '-y', 'pass']) - return { status: 'success', message: 'pass installed' } + return {status: 'success', message: 'pass installed'} } catch { - return { status: 'failed', hint: 'Run manually: sudo apt-get install -y pass' } + return {status: 'failed', hint: 'Run manually: sudo apt-get install -y pass'} } }, }) @@ -456,17 +522,17 @@ export function buildSteps(platformInfo, selection, context = {}) { // Skip if pass is already initialized const lsResult = await exec('pass', ['ls']) if (lsResult.exitCode === 0) { - return { status: 'skipped', message: 'pass store already initialized' } + return {status: 'skipped', message: 'pass store already initialized'} } const gpgId = context.gpgId if (!gpgId) { - return { status: 'failed', hint: 'No GPG key ID available — complete the create-gpg-key step first' } + return {status: 'failed', hint: 'No GPG key ID available — complete the create-gpg-key step first'} } try { await execOrThrow('pass', ['init', gpgId]) - return { status: 'success', message: `pass initialized with key ${gpgId}` } + return {status: 'success', message: `pass initialized with key ${gpgId}`} } catch { - return { status: 'failed', hint: `Run manually: pass init ${gpgId}` } + return {status: 'failed', hint: `Run manually: pass init ${gpgId}`} } }, }) @@ -479,12 +545,16 @@ export function buildSteps(platformInfo, selection, context = {}) { requiresConfirmation: true, run: async () => { const existing = await which('aws-vault') - if (existing) return { status: 'skipped', message: 'aws-vault already installed' } + if (existing) return {status: 'skipped', message: 'aws-vault already installed'} const arch = process.arch === 'arm64' ? 'arm64' : 'amd64' const url = `https://github.com/99designs/aws-vault/releases/latest/download/aws-vault-linux-${arch}` try { - await execOrThrow('sudo', ['sh', '-c', `curl -sSL '${url}' -o /usr/local/bin/aws-vault && chmod +x /usr/local/bin/aws-vault`]) - return { status: 'success', message: 'aws-vault installed to /usr/local/bin/aws-vault' } + await execOrThrow('sudo', [ + 'sh', + '-c', + `curl -sSL '${url}' -o /usr/local/bin/aws-vault && chmod +x /usr/local/bin/aws-vault`, + ]) + return {status: 'success', message: 'aws-vault installed to /usr/local/bin/aws-vault'} } catch { return { status: 'failed', @@ -505,9 +575,9 @@ export function buildSteps(platformInfo, selection, context = {}) { try { await appendToShellProfile('export AWS_VAULT_BACKEND=pass') await appendToShellProfile('export GPG_TTY=$(tty)') - return { status: 'success', message: 'AWS_VAULT_BACKEND=pass and GPG_TTY added to shell profile' } + return {status: 'success', message: 'AWS_VAULT_BACKEND=pass and GPG_TTY added to shell profile'} } catch { - return { status: 'failed', hint: 'Add manually to ~/.bashrc or ~/.zshrc: export AWS_VAULT_BACKEND=pass' } + return {status: 'failed', hint: 'Add manually to ~/.bashrc or ~/.zshrc: export AWS_VAULT_BACKEND=pass'} } }, }) @@ -521,10 +591,10 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const result = await exec('aws-vault', ['--version']) if (result.exitCode !== 0) { - return { status: 'failed', hint: 'aws-vault not found in PATH after install' } + return {status: 'failed', hint: 'aws-vault not found in PATH after install'} } const version = (result.stdout || result.stderr).trim() - return { status: 'success', message: `aws-vault ${version}` } + return {status: 'success', message: `aws-vault ${version}`} }, }) } @@ -541,9 +611,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const bridgePath = '/mnt/c/Program Files/Git/mingw64/bin/git-credential-manager.exe' if (existsSync(bridgePath)) { - return { status: 'success', message: 'Windows GCM bridge found — using Windows Credential Manager' } + return {status: 'success', message: 'Windows GCM bridge found — using Windows Credential Manager'} } - return { status: 'skipped', message: 'Windows GCM not found — will install native Linux GCM' } + return {status: 'skipped', message: 'Windows GCM not found — will install native Linux GCM'} }, }) } @@ -556,21 +626,25 @@ export function buildSteps(platformInfo, selection, context = {}) { requiresConfirmation: true, run: async () => { const existing = await which('git-credential-manager') - if (existing) return { status: 'skipped', message: 'Git Credential Manager already installed' } + if (existing) return {status: 'skipped', message: 'Git Credential Manager already installed'} try { - const latestResult = await exec('sh', ['-c', "curl -sSL https://api.github.com/repos/git-ecosystem/git-credential-manager/releases/latest | grep 'browser_download_url.*gcm.*linux.*amd64.*deb' | head -1 | cut -d '\"' -f 4"]) + const latestResult = await exec('sh', [ + '-c', + "curl -sSL https://api.github.com/repos/git-ecosystem/git-credential-manager/releases/latest | grep 'browser_download_url.*gcm.*linux.*amd64.*deb' | head -1 | cut -d '\"' -f 4", + ]) const debUrl = latestResult.stdout.trim() if (!debUrl) throw new Error('Could not find GCM deb package URL') // Security: validate debUrl is a legitimate GitHub release asset URL before using it // This prevents command injection if the GitHub API response were tampered with (MITM / supply-chain) - const SAFE_DEB_URL = /^https:\/\/github\.com\/git-ecosystem\/git-credential-manager\/releases\/download\/[a-zA-Z0-9._\-/]+\.deb$/ + const SAFE_DEB_URL = + /^https:\/\/github\.com\/git-ecosystem\/git-credential-manager\/releases\/download\/[a-zA-Z0-9._\-/]+\.deb$/ if (!SAFE_DEB_URL.test(debUrl)) { throw new Error(`Unexpected GCM package URL format: "${debUrl}"`) } // Use array args — no shell interpolation of the URL await execOrThrow('curl', ['-sSL', debUrl, '-o', '/tmp/gcm.deb']) await execOrThrow('sudo', ['dpkg', '-i', '/tmp/gcm.deb']) - return { status: 'success', message: 'Git Credential Manager installed' } + return {status: 'success', message: 'Git Credential Manager installed'} } catch { return { status: 'failed', @@ -590,9 +664,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { try { await execOrThrow('git-credential-manager', ['configure']) - return { status: 'success', message: 'Git Credential Manager configured' } + return {status: 'success', message: 'Git Credential Manager configured'} } catch { - return { status: 'failed', hint: 'Run manually: git-credential-manager configure' } + return {status: 'failed', hint: 'Run manually: git-credential-manager configure'} } }, }) @@ -606,9 +680,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { try { await execOrThrow('git', ['config', '--global', 'credential.credentialStore', 'gpg']) - return { status: 'success', message: 'GCM credential store set to gpg' } + return {status: 'success', message: 'GCM credential store set to gpg'} } catch { - return { status: 'failed', hint: 'Run manually: git config --global credential.credentialStore gpg' } + return {status: 'failed', hint: 'Run manually: git config --global credential.credentialStore gpg'} } }, }) @@ -622,9 +696,9 @@ export function buildSteps(platformInfo, selection, context = {}) { run: async () => { const result = await exec('git-credential-manager', ['--version']) if (result.exitCode !== 0) { - return { status: 'failed', hint: 'git-credential-manager not found in PATH' } + return {status: 'failed', hint: 'git-credential-manager not found in PATH'} } - return { status: 'success', message: `GCM ${result.stdout.trim()}` } + return {status: 'success', message: `GCM ${result.stdout.trim()}`} }, }) } diff --git a/src/services/shell.js b/src/services/shell.js index 5260e14..4ca7ea5 100644 --- a/src/services/shell.js +++ b/src/services/shell.js @@ -1,4 +1,4 @@ -import { execa } from 'execa' +import {execa} from 'execa' /** @import { ExecResult } from '../types.js' */ @@ -10,7 +10,7 @@ import { execa } from 'execa' * @returns {Promise} */ export async function exec(command, args = [], opts = {}) { - const result = await execa(command, args, { reject: false, ...opts }) + const result = await execa(command, args, {reject: false, ...opts}) return { stdout: result.stdout?.trim() ?? '', stderr: result.stderr?.trim() ?? '', @@ -24,7 +24,7 @@ export async function exec(command, args = [], opts = {}) { * @returns {Promise} Resolved path or null if not found */ export async function which(binary) { - const result = await execa('which', [binary], { reject: false }) + const result = await execa('which', [binary], {reject: false}) if (result.exitCode !== 0 || !result.stdout) return null return result.stdout.trim() } @@ -37,6 +37,6 @@ export async function which(binary) { * @returns {Promise} */ export async function execOrThrow(command, args = [], opts = {}) { - const result = await execa(command, args, { reject: true, ...opts }) + const result = await execa(command, args, {reject: true, ...opts}) return result.stdout?.trim() ?? '' } diff --git a/src/services/skills-sh.js b/src/services/skills-sh.js index d721180..8eb1d8f 100644 --- a/src/services/skills-sh.js +++ b/src/services/skills-sh.js @@ -1,4 +1,4 @@ -import { DvmiError } from '../utils/errors.js' +import {DvmiError} from '../utils/errors.js' /** @import { Skill } from '../types.js' */ @@ -37,17 +37,11 @@ export async function searchSkills(query, limit = 50) { try { res = await fetch(url.toString()) } catch { - throw new DvmiError( - 'Unable to reach skills.sh API', - 'Check your internet connection and try again', - ) + throw new DvmiError('Unable to reach skills.sh API', 'Check your internet connection and try again') } if (!res.ok) { - throw new DvmiError( - `skills.sh API returned ${res.status}`, - 'Try again later or visit https://skills.sh', - ) + throw new DvmiError(`skills.sh API returned ${res.status}`, 'Try again later or visit https://skills.sh') } /** @type {unknown} */ diff --git a/src/services/speckit.js b/src/services/speckit.js index 689e3cd..d107cc8 100644 --- a/src/services/speckit.js +++ b/src/services/speckit.js @@ -1,6 +1,6 @@ -import { execa } from 'execa' -import { which, exec } from './shell.js' -import { DvmiError } from '../utils/errors.js' +import {execa} from 'execa' +import {which, exec} from './shell.js' +import {DvmiError} from '../utils/errors.js' /** GitHub spec-kit package source for uv. * TODO: pin to a specific tagged release (e.g. #v1.x.x) once one is available upstream. @@ -68,9 +68,6 @@ export async function runSpecifyInit(cwd, opts = {}) { }) if (result.exitCode !== 0) { - throw new DvmiError( - '`specify init` exited with a non-zero code', - 'Check the output above for details', - ) + throw new DvmiError('`specify init` exited with a non-zero code', 'Check the output above for details') } } diff --git a/src/services/version-check.js b/src/services/version-check.js index 5522b82..f0550b4 100644 --- a/src/services/version-check.js +++ b/src/services/version-check.js @@ -1,8 +1,8 @@ -import { readFile } from 'node:fs/promises' -import { join } from 'node:path' -import { fileURLToPath } from 'node:url' -import { loadConfig, saveConfig } from './config.js' -import { exec } from './shell.js' +import {readFile} from 'node:fs/promises' +import {join} from 'node:path' +import {fileURLToPath} from 'node:url' +import {loadConfig, saveConfig} from './config.js' +import {exec} from './shell.js' const PKG_PATH = join(fileURLToPath(import.meta.url), '..', '..', '..', 'package.json') const REPO = 'devvami/devvami' @@ -22,7 +22,7 @@ export async function getCurrentVersion() { * @param {{ force?: boolean }} [opts] * @returns {Promise} */ -export async function getLatestVersion({ force = false } = {}) { +export async function getLatestVersion({force = false} = {}) { const config = await loadConfig() const now = Date.now() const lastCheck = config.lastVersionCheck ? new Date(config.lastVersionCheck).getTime() : 0 @@ -38,7 +38,7 @@ export async function getLatestVersion({ force = false } = {}) { // Il tag è nel formato "v1.0.0" — rimuove il prefisso "v" const latest = result.stdout.trim().replace(/^v/, '') || null if (latest) { - await saveConfig({ ...config, latestVersion: latest, lastVersionCheck: new Date().toISOString() }) + await saveConfig({...config, latestVersion: latest, lastVersionCheck: new Date().toISOString()}) } return latest } catch { @@ -51,8 +51,8 @@ export async function getLatestVersion({ force = false } = {}) { * @param {{ force?: boolean }} [opts] * @returns {Promise<{ hasUpdate: boolean, current: string, latest: string|null }>} */ -export async function checkForUpdate({ force = false } = {}) { - const [current, latest] = await Promise.all([getCurrentVersion(), getLatestVersion({ force })]) +export async function checkForUpdate({force = false} = {}) { + const [current, latest] = await Promise.all([getCurrentVersion(), getLatestVersion({force})]) const hasUpdate = Boolean(latest && latest !== current) - return { hasUpdate, current, latest } + return {hasUpdate, current, latest} } diff --git a/src/types.js b/src/types.js index e9819f3..011cfeb 100644 --- a/src/types.js +++ b/src/types.js @@ -332,6 +332,91 @@ * @typedef {'macos'|'wsl2'|'linux'} Platform */ +// ────────────────────────────────────────────────────────────────────────────── +// AI Config Sync TUI types +// ────────────────────────────────────────────────────────────────────────────── + +/** + * @typedef {'mcp'|'command'|'skill'|'agent'} CategoryType + */ + +/** + * @typedef {'vscode-copilot'|'claude-code'|'opencode'|'gemini-cli'|'copilot-cli'} EnvironmentId + */ + +/** + * @typedef {Object} MCPParams + * @property {'stdio'|'sse'|'streamable-http'} transport - MCP transport type + * @property {string} [command] - Command to execute (required for stdio transport) + * @property {string[]} [args] - Command arguments + * @property {Record} [env] - Environment variables + * @property {string} [url] - Server URL (required for sse/streamable-http transport) + */ + +/** + * @typedef {Object} CommandParams + * @property {string} content - Prompt/command text content (multi-line) + * @property {string} [description] - Short description of the command + */ + +/** + * @typedef {Object} SkillParams + * @property {string} content - Skill definition content (multi-line) + * @property {string} [description] - Short description of the skill + */ + +/** + * @typedef {Object} AgentParams + * @property {string} instructions - Agent instructions (multi-line) + * @property {string} [description] - Short description of the agent + */ + +/** + * @typedef {Object} CategoryEntry + * @property {string} id - UUID v4, auto-generated + * @property {string} name - Unique within its type; used as filename/key when deploying + * @property {CategoryType} type - Category type + * @property {boolean} active - true = deployed to environments, false = removed but kept in store + * @property {EnvironmentId[]} environments - Target environments for deployment + * @property {MCPParams|CommandParams|SkillParams|AgentParams} params - Type-specific parameters + * @property {string} createdAt - ISO 8601 timestamp + * @property {string} updatedAt - ISO 8601 timestamp + */ + +/** + * @typedef {Object} AIConfigStore + * @property {number} version - Schema version + * @property {CategoryEntry[]} entries - All managed configuration entries + */ + +/** + * @typedef {Object} PathStatus + * @property {string} path - Absolute path + * @property {boolean} exists - Whether the path exists on disk + * @property {boolean} readable - Whether the file could be parsed (for JSON/TOML files) + */ + +/** + * @typedef {Object} CategoryCounts + * @property {number} mcp + * @property {number} command + * @property {number} skill + * @property {number} agent + */ + +/** + * @typedef {Object} DetectedEnvironment + * @property {EnvironmentId} id - Environment identifier + * @property {string} name - Display name (e.g. "Claude Code") + * @property {boolean} detected - Whether any config files were found + * @property {PathStatus[]} projectPaths - Project-level paths and their existence status + * @property {PathStatus[]} globalPaths - Global-level paths and their existence status + * @property {string[]} unreadable - Paths that exist but failed to parse + * @property {CategoryType[]} supportedCategories - Category types this environment supports + * @property {CategoryCounts} counts - Per-category item counts from dvmi-managed entries + * @property {'project'|'global'|'both'} scope - Where detection occurred + */ + /** * @typedef {Object} PlatformInfo * @property {Platform} platform diff --git a/src/utils/aws-vault.js b/src/utils/aws-vault.js index ac0c7bf..0b7c748 100644 --- a/src/utils/aws-vault.js +++ b/src/utils/aws-vault.js @@ -1,5 +1,5 @@ -import { loadConfigSync } from '../services/config.js' -import { execa } from 'execa' +import {loadConfigSync} from '../services/config.js' +import {execa} from 'execa' /** * Returns the aws-vault exec prefix to prepend to AWS CLI commands. @@ -44,10 +44,7 @@ export function awsVaultPrefix(config = null) { * @returns {boolean} */ export function hasAwsCredentialEnv() { - return Boolean( - process.env.AWS_ACCESS_KEY_ID || - process.env.AWS_SESSION_TOKEN, - ) + return Boolean(process.env.AWS_ACCESS_KEY_ID || process.env.AWS_SESSION_TOKEN) } /** @@ -79,24 +76,14 @@ export async function reexecCurrentCommandWithAwsVault(config = null) { if (process.env.DVMI_AWS_VAULT_REEXEC === '1') return null try { - const child = await execa( - 'aws-vault', - [ - 'exec', - profile, - '--', - process.execPath, - ...process.argv.slice(1), - ], - { - reject: false, - stdio: 'inherit', - env: { - ...process.env, - DVMI_AWS_VAULT_REEXEC: '1', - }, + const child = await execa('aws-vault', ['exec', profile, '--', process.execPath, ...process.argv.slice(1)], { + reject: false, + stdio: 'inherit', + env: { + ...process.env, + DVMI_AWS_VAULT_REEXEC: '1', }, - ) + }) return child.exitCode ?? 1 } catch { @@ -117,25 +104,15 @@ export async function reexecCurrentCommandWithAwsVaultProfile(profile, extraEnv if (!profile) return null try { - const child = await execa( - 'aws-vault', - [ - 'exec', - profile, - '--', - process.execPath, - ...process.argv.slice(1), - ], - { - reject: false, - stdio: 'inherit', - env: { - ...process.env, - DVMI_AWS_VAULT_REEXEC: '1', - ...extraEnv, - }, + const child = await execa('aws-vault', ['exec', profile, '--', process.execPath, ...process.argv.slice(1)], { + reject: false, + stdio: 'inherit', + env: { + ...process.env, + DVMI_AWS_VAULT_REEXEC: '1', + ...extraEnv, }, - ) + }) return child.exitCode ?? 1 } catch { diff --git a/src/utils/banner.js b/src/utils/banner.js index 6d4428b..e6e3a3e 100644 --- a/src/utils/banner.js +++ b/src/utils/banner.js @@ -1,6 +1,6 @@ import figlet from 'figlet' import chalk from 'chalk' -import { BRAND_GRADIENT, animateGradientBanner, isColorEnabled } from './gradient.js' +import {BRAND_GRADIENT, animateGradientBanner, isColorEnabled} from './gradient.js' // Brand colors export const ORANGE = '#FF6B2B' @@ -24,13 +24,11 @@ function figletAsync(text, opts) { * @returns {Promise} */ export async function printBanner() { - const art = await figletAsync('DVMI', { font: 'ANSI Shadow' }) - const artLines = art.split('\n').filter((l) => l.trim() !== '') - const width = Math.max(...artLines.map((l) => l.length)) + 4 + const art = await figletAsync('DVMI', {font: 'ANSI Shadow'}) + const artLines = art.split('\n').filter((l) => l.trim() !== '') + const width = Math.max(...artLines.map((l) => l.length)) + 4 - const tagline = isColorEnabled - ? chalk.hex(BLUE).bold(' Devvami Developer CLI') - : ' Devvami Developer CLI' + const tagline = isColorEnabled ? chalk.hex(BLUE).bold(' Devvami Developer CLI') : ' Devvami Developer CLI' const separator = isColorEnabled ? chalk.hex(BLUE).dim('─'.repeat(Math.min(width, 60))) diff --git a/src/utils/errors.js b/src/utils/errors.js index c96a96a..22c31dc 100644 --- a/src/utils/errors.js +++ b/src/utils/errors.js @@ -2,14 +2,14 @@ * Base CLI error with an actionable hint for the user. */ export class DvmiError extends Error { - /** - * @param {string} message - Human-readable error message - * @param {string} hint - Actionable suggestion to resolve the error - * @param {number} [exitCode] - Process exit code (default: 1) - */ - constructor(message, hint, exitCode = 1) { - super(message) - this.name = 'DvmiError' + /** + * @param {string} message - Human-readable error message + * @param {string} hint - Actionable suggestion to resolve the error + * @param {number} [exitCode] - Process exit code (default: 1) + */ + constructor(message, hint, exitCode = 1) { + super(message) + this.name = 'DvmiError' /** @type {string} */ this.hint = hint /** @type {number} */ @@ -21,43 +21,39 @@ export class DvmiError extends Error { * Validation error for invalid user input (exit code 2). */ export class ValidationError extends DvmiError { - /** - * @param {string} message - * @param {string} hint - */ - constructor(message, hint) { - super(message, hint, 2) - this.name = 'ValidationError' - // oclif reads this.oclif.exit to determine the process exit code - this.oclif = { exit: 2 } - } - } + /** + * @param {string} message + * @param {string} hint + */ + constructor(message, hint) { + super(message, hint, 2) + this.name = 'ValidationError' + // oclif reads this.oclif.exit to determine the process exit code + this.oclif = {exit: 2} + } +} - /** - * Auth error for missing or expired authentication. - */ - export class AuthError extends DvmiError { - /** - * @param {string} service - Service name (e.g. "GitHub", "AWS") - */ - constructor(service) { - super( - `${service} authentication required`, - `Run \`dvmi auth login\` to authenticate`, - 1, - ) - this.name = 'AuthError' - } - } +/** + * Auth error for missing or expired authentication. + */ +export class AuthError extends DvmiError { + /** + * @param {string} service - Service name (e.g. "GitHub", "AWS") + */ + constructor(service) { + super(`${service} authentication required`, `Run \`dvmi auth login\` to authenticate`, 1) + this.name = 'AuthError' + } +} - /** - * Format an error for display in the terminal. - * @param {Error} err - * @returns {string} - */ - export function formatError(err) { - if (err instanceof DvmiError) { - return `Error: ${err.message}\nHint: ${err.hint}` - } - return `Error: ${err.message}` - } +/** + * Format an error for display in the terminal. + * @param {Error} err + * @returns {string} + */ +export function formatError(err) { + if (err instanceof DvmiError) { + return `Error: ${err.message}\nHint: ${err.hint}` + } + return `Error: ${err.message}` +} diff --git a/src/utils/frontmatter.js b/src/utils/frontmatter.js index a324b6f..7b2b558 100644 --- a/src/utils/frontmatter.js +++ b/src/utils/frontmatter.js @@ -18,7 +18,7 @@ import yaml from 'js-yaml' export function parseFrontmatter(content) { const match = content.match(/^---\r?\n([\s\S]*?)---\r?\n?([\s\S]*)$/) if (!match) { - return { frontmatter: {}, body: content } + return {frontmatter: {}, body: content} } const rawYaml = match[1] const body = match[2] ?? '' @@ -28,9 +28,9 @@ export function parseFrontmatter(content) { parsed && typeof parsed === 'object' && !Array.isArray(parsed) ? /** @type {Record} */ (parsed) : {} - return { frontmatter, body } + return {frontmatter, body} } catch { - return { frontmatter: {}, body: content } + return {frontmatter: {}, body: content} } } @@ -47,6 +47,6 @@ export function serializeFrontmatter(frontmatter, body) { if (!frontmatter || Object.keys(frontmatter).length === 0) { return body } - const yamlStr = yaml.dump(frontmatter, { lineWidth: -1 }).trimEnd() + const yamlStr = yaml.dump(frontmatter, {lineWidth: -1}).trimEnd() return `---\n${yamlStr}\n---\n${body}` } diff --git a/src/utils/gradient.js b/src/utils/gradient.js index 086860c..13d3c69 100644 --- a/src/utils/gradient.js +++ b/src/utils/gradient.js @@ -12,9 +12,9 @@ import readline from 'node:readline' /** @type {GradientStop[]} */ export const BRAND_GRADIENT = [ - [0, 212, 255], // #00D4FF — ciano elettrico - [0, 100, 255], // #0064FF — blu vivido - [100, 0, 220], // #6400DC — indaco profondo + [0, 212, 255], // #00D4FF — ciano elettrico + [0, 100, 255], // #0064FF — blu vivido + [100, 0, 220], // #6400DC — indaco profondo ] // ────────────────────────────────────────────────────────────────────────────── @@ -51,24 +51,26 @@ export function gradientText(text, stops, phase = 0) { const segments = stops.length - 1 - return chars.map((char, i) => { - if (char === ' ') return char + return chars + .map((char, i) => { + if (char === ' ') return char - // Normalise t in [0, 1] with phase shift - const t = ((i / Math.max(len - 1, 1)) + phase) % 1 + // Normalise t in [0, 1] with phase shift + const t = (i / Math.max(len - 1, 1) + phase) % 1 - const seg = Math.min(Math.floor(t * segments), segments - 1) - const localT = t * segments - seg + const seg = Math.min(Math.floor(t * segments), segments - 1) + const localT = t * segments - seg - const [r1, g1, b1] = stops[seg] - const [r2, g2, b2] = stops[seg + 1] + const [r1, g1, b1] = stops[seg] + const [r2, g2, b2] = stops[seg + 1] - const r = Math.round(r1 + (r2 - r1) * localT) - const g = Math.round(g1 + (g2 - g1) * localT) - const b = Math.round(b1 + (b2 - b1) * localT) + const r = Math.round(r1 + (r2 - r1) * localT) + const g = Math.round(g1 + (g2 - g1) * localT) + const b = Math.round(b1 + (b2 - b1) * localT) - return chalk.rgb(r, g, b)(char) - }).join('') + return chalk.rgb(r, g, b)(char) + }) + .join('') } // ────────────────────────────────────────────────────────────────────────────── diff --git a/src/utils/open-browser.js b/src/utils/open-browser.js index da984a7..5aa2707 100644 --- a/src/utils/open-browser.js +++ b/src/utils/open-browser.js @@ -1,6 +1,6 @@ import open from 'open' -import { detectPlatform } from '../services/platform.js' -import { exec } from '../services/shell.js' +import {detectPlatform} from '../services/platform.js' +import {exec} from '../services/shell.js' /** * Open a URL in the default browser, using the platform-appropriate command. @@ -8,7 +8,7 @@ import { exec } from '../services/shell.js' * @returns {Promise} */ export async function openBrowser(url) { - const { platform, openCommand } = await detectPlatform() + const {platform, openCommand} = await detectPlatform() if (platform === 'macos') { await open(url) diff --git a/src/utils/tui/form.js b/src/utils/tui/form.js new file mode 100644 index 0000000..93b7013 --- /dev/null +++ b/src/utils/tui/form.js @@ -0,0 +1,1006 @@ +/** + * @module form + * Inline form component for the dvmi sync-config-ai TUI. + * All rendering functions are pure (no terminal side effects). + * The parent tab-tui.js is responsible for writing rendered lines to the screen. + */ + +import chalk from 'chalk' + +// ────────────────────────────────────────────────────────────────────────────── +// Typedefs +// ────────────────────────────────────────────────────────────────────────────── + +/** + * @typedef {Object} TextField + * @property {'text'} type + * @property {string} label + * @property {string} value + * @property {number} cursor - Cursor position (0 = before first char) + * @property {boolean} required + * @property {string} placeholder + * @property {string} [key] - Optional override key for extractValues output + */ + +/** + * @typedef {Object} SelectorField + * @property {'selector'} type + * @property {string} label + * @property {string[]} options + * @property {number} selectedIndex + * @property {boolean} required + * @property {string} [key] + */ + +/** + * @typedef {{ id: string, label: string }} MultiSelectOption + */ + +/** + * @typedef {Object} MultiSelectField + * @property {'multiselect'} type + * @property {string} label + * @property {MultiSelectOption[]} options + * @property {Set} selected + * @property {number} focusedOptionIndex + * @property {boolean} required + * @property {string} [key] + */ + +/** + * @typedef {Object} MiniEditorField + * @property {'editor'} type + * @property {string} label + * @property {string[]} lines + * @property {number} cursorLine + * @property {number} cursorCol + * @property {boolean} required + * @property {string} [key] + */ + +/** + * @typedef {TextField|SelectorField|MultiSelectField|MiniEditorField} Field + */ + +/** + * @typedef {Object} FormState + * @property {Field[]} fields + * @property {number} focusedFieldIndex + * @property {string} title + * @property {'editing'|'submitted'|'cancelled'} status + * @property {string|null} errorMessage + */ + +/** + * @typedef {Object} SubmitResult + * @property {true} submitted + * @property {object} values + */ + +/** + * @typedef {Object} CancelResult + * @property {true} cancelled + */ + +// ────────────────────────────────────────────────────────────────────────────── +// Internal helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Convert a field label to a plain object key (lowercase, spaces → underscores). + * If the field has a `key` property, use that instead. + * @param {Field} field + * @returns {string} + */ +function fieldKey(field) { + if (field.key) return field.key + return field.label.toLowerCase().replace(/\s+/g, '_') +} + +/** + * Render the text cursor inside a string value at the given position. + * Inserts a `|` character at the cursor index. + * @param {string} value + * @param {number} cursor + * @returns {string} + */ +function renderCursor(value, cursor) { + return value.slice(0, cursor) + chalk.inverse('|') + value.slice(cursor) +} + +// ────────────────────────────────────────────────────────────────────────────── +// buildFieldLine +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Render a single form field as a terminal line. + * + * - TextField: ` [label]: [value with cursor shown as |]` + * - SelectorField: ` [label]: < option >` + * - MultiSelectField: ` [label]: [N/total checked]` + * - MiniEditorField: ` [label]: [N lines]` + * + * When focused, the line is prefixed with a bold `> ` indicator instead of ` `. + * + * @param {Field} field + * @param {boolean} focused + * @returns {string} + */ +export function buildFieldLine(field, focused) { + const prefix = focused ? chalk.bold('> ') : ' ' + + if (field.type === 'text') { + const display = focused + ? renderCursor(field.value, field.cursor) + : field.value || chalk.dim(field.placeholder || '') + return `${prefix}${chalk.bold(field.label)}: ${display}` + } + + if (field.type === 'selector') { + const option = field.options[field.selectedIndex] ?? '' + const arrows = focused ? `${chalk.bold('< ')}${chalk.cyan(option)}${chalk.bold(' >')}` : `< ${option} >` + return `${prefix}${chalk.bold(field.label)}: ${arrows}` + } + + if (field.type === 'multiselect') { + const count = field.selected.size + const total = field.options.length + const summary = focused ? chalk.cyan(`${count}/${total} selected`) : `${count}/${total} selected` + return `${prefix}${chalk.bold(field.label)}: ${summary}` + } + + if (field.type === 'editor') { + const lineCount = field.lines.length + const summary = focused + ? chalk.cyan(`${lineCount} line${lineCount === 1 ? '' : 's'}`) + : `${lineCount} line${lineCount === 1 ? '' : 's'}` + return `${prefix}${chalk.bold(field.label)}: ${summary}` + } + + return `${prefix}${chalk.bold(/** @type {any} */ (field).label)}: —` +} + +// ────────────────────────────────────────────────────────────────────────────── +// buildMultiSelectLines +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Render expanded MultiSelectField options as multiple lines (shown when focused). + * Each option shows `[x]` when selected and `[ ]` when not. + * The option under the cursor is highlighted with chalk.bold. + * + * @param {MultiSelectField} field + * @param {boolean} focused + * @param {number} maxLines - Maximum number of lines to return + * @returns {string[]} + */ +export function buildMultiSelectLines(field, focused, maxLines) { + const lines = [] + for (let i = 0; i < field.options.length; i++) { + const opt = field.options[i] + const checked = field.selected.has(opt.id) ? chalk.green('[x]') : '[ ]' + const label = opt.label + const isCursor = focused && i === field.focusedOptionIndex + const line = isCursor ? chalk.bold(` ${checked} ${label}`) : ` ${checked} ${label}` + lines.push(line) + if (lines.length >= maxLines) break + } + return lines +} + +// ────────────────────────────────────────────────────────────────────────────── +// buildMiniEditorLines +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Render MiniEditorField content with line numbers. + * When focused, inserts `|` at the cursor column on the active line. + * Returns up to `maxLines` lines. + * + * @param {MiniEditorField} field + * @param {boolean} focused + * @param {number} maxLines - Maximum number of lines to return + * @returns {string[]} + */ +export function buildMiniEditorLines(field, focused, maxLines) { + const lines = [] + const numWidth = String(field.lines.length).length + + for (let i = 0; i < field.lines.length; i++) { + const lineNum = String(i + 1).padStart(numWidth) + const rawLine = field.lines[i] + let content + if (focused && i === field.cursorLine) { + content = renderCursor(rawLine, field.cursorCol) + } else { + content = rawLine + } + lines.push(` ${chalk.dim(lineNum + ' │')} ${content}`) + if (lines.length >= maxLines) break + } + return lines +} + +// ────────────────────────────────────────────────────────────────────────────── +// buildFormScreen +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Render all form fields into an array of terminal lines. + * + * For the currently focused field: + * - MultiSelectField: renders expanded options below the field header line + * - MiniEditorField: renders editor content lines below the field header line + * - Other types: renders just the single header line + * + * Returns an array of lines (no ANSI clear/home — the parent handles that). + * Includes the form title at the top, an error message if set, all fields, + * and a footer hint line at the bottom. + * + * @param {FormState} formState + * @param {number} viewportHeight - Available content lines + * @param {number} termCols - Terminal width + * @returns {string[]} + */ +export function buildFormScreen(formState, viewportHeight, termCols) { + const lines = [] + + // ── Title ────────────────────────────────────────────────────────────────── + lines.push('') + lines.push(` ${chalk.bold.cyan(formState.title)}`) + lines.push(` ${chalk.dim('─'.repeat(Math.min(termCols - 4, 60)))}`) + + // ── Error message ───────────────────────────────────────────────────────── + if (formState.errorMessage) { + lines.push(` ${chalk.red('✖ ' + formState.errorMessage)}`) + } + + lines.push('') + + // ── Fields ──────────────────────────────────────────────────────────────── + const FOOTER_RESERVE = 2 + const availableForFields = viewportHeight - lines.length - FOOTER_RESERVE + + for (let i = 0; i < formState.fields.length; i++) { + const field = formState.fields[i] + const isFocused = i === formState.focusedFieldIndex + + // Header line + lines.push(buildFieldLine(field, isFocused)) + + // Expanded inline content for focused multiselect / editor + if (isFocused) { + const remaining = availableForFields - lines.length + if (field.type === 'multiselect' && remaining > 0) { + const expanded = buildMultiSelectLines(field, true, remaining) + lines.push(...expanded) + } else if (field.type === 'editor' && remaining > 0) { + const expanded = buildMiniEditorLines(field, true, remaining) + lines.push(...expanded) + } + } + } + + // ── Footer hint ─────────────────────────────────────────────────────────── + lines.push('') + lines.push(chalk.dim(' Tab next field Shift+Tab prev Ctrl+S save Esc cancel')) + + return lines +} + +// ────────────────────────────────────────────────────────────────────────────── +// extractValues +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Extract form field values into a plain object. + * + * - TextField → string value + * - SelectorField → selected option string + * - MultiSelectField → array of selected ids + * - MiniEditorField → lines joined with `\n` + * + * The key for each field is `field.key` if set, otherwise the label lowercased + * with spaces replaced by underscores. + * + * @param {FormState} formState + * @returns {object} + */ +export function extractValues(formState) { + /** @type {Record} */ + const result = {} + + for (const field of formState.fields) { + const key = fieldKey(field) + + if (field.type === 'text') { + result[key] = field.value + } else if (field.type === 'selector') { + result[key] = field.options[field.selectedIndex] ?? '' + } else if (field.type === 'multiselect') { + result[key] = Array.from(field.selected) + } else if (field.type === 'editor') { + result[key] = field.lines.join('\n') + } + } + + return result +} + +// ────────────────────────────────────────────────────────────────────────────── +// Validation helper +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Check that all required fields have a non-empty value. + * Returns the label of the first invalid field, or null if all are valid. + * @param {FormState} formState + * @returns {string|null} + */ +function validateForm(formState) { + for (const field of formState.fields) { + if (!field.required) continue + + if (field.type === 'text' && field.value.trim() === '') { + return field.label + } + if (field.type === 'selector' && field.options.length === 0) { + return field.label + } + if (field.type === 'multiselect' && field.selected.size === 0) { + return field.label + } + if (field.type === 'editor') { + const content = field.lines.join('\n').trim() + if (content === '') return field.label + } + } + return null +} + +// ────────────────────────────────────────────────────────────────────────────── +// Field-specific keypress handlers (pure) +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Handle a keypress on a focused TextField. Returns updated field. + * @param {TextField} field + * @param {{ name: string, sequence?: string, ctrl?: boolean, shift?: boolean }} key + * @returns {TextField} + */ +function handleTextFieldKey(field, key) { + const {value, cursor} = field + + if (key.name === 'backspace') { + if (cursor === 0) return field + return { + ...field, + value: value.slice(0, cursor - 1) + value.slice(cursor), + cursor: cursor - 1, + } + } + + if (key.name === 'delete') { + if (cursor >= value.length) return field + return { + ...field, + value: value.slice(0, cursor) + value.slice(cursor + 1), + } + } + + if (key.name === 'left') { + return {...field, cursor: Math.max(0, cursor - 1)} + } + if (key.name === 'right') { + return {...field, cursor: Math.min(value.length, cursor + 1)} + } + if (key.name === 'home') { + return {...field, cursor: 0} + } + if (key.name === 'end') { + return {...field, cursor: value.length} + } + + // Printable character + if (key.sequence && key.sequence.length === 1 && !key.ctrl) { + const ch = key.sequence + if (ch >= ' ') { + return { + ...field, + value: value.slice(0, cursor) + ch + value.slice(cursor), + cursor: cursor + 1, + } + } + } + + return field +} + +/** + * Handle a keypress on a focused SelectorField. Returns updated field. + * @param {SelectorField} field + * @param {{ name: string }} key + * @returns {SelectorField} + */ +function handleSelectorFieldKey(field, key) { + const len = field.options.length + if (len === 0) return field + + if (key.name === 'left') { + return {...field, selectedIndex: (field.selectedIndex - 1 + len) % len} + } + if (key.name === 'right') { + return {...field, selectedIndex: (field.selectedIndex + 1) % len} + } + + return field +} + +/** + * Handle a keypress on a focused MultiSelectField. + * Returns updated field or { advanceField: true } signal object. + * @param {MultiSelectField} field + * @param {{ name: string }} key + * @returns {MultiSelectField | { advanceField: true }} + */ +function handleMultiSelectFieldKey(field, key) { + const len = field.options.length + + if (key.name === 'up') { + return {...field, focusedOptionIndex: Math.max(0, field.focusedOptionIndex - 1)} + } + if (key.name === 'down') { + return {...field, focusedOptionIndex: Math.min(len - 1, field.focusedOptionIndex + 1)} + } + + if (key.name === 'space') { + const opt = field.options[field.focusedOptionIndex] + if (!opt) return field + const newSelected = new Set(field.selected) + if (newSelected.has(opt.id)) { + newSelected.delete(opt.id) + } else { + newSelected.add(opt.id) + } + return {...field, selected: newSelected} + } + + if (key.name === 'return') { + return {advanceField: /** @type {true} */ (true)} + } + + return field +} + +/** + * Handle a keypress on a focused MiniEditorField. + * Returns updated field or { advanceField: true } signal object. + * @param {MiniEditorField} field + * @param {{ name: string, sequence?: string, ctrl?: boolean }} key + * @returns {MiniEditorField | { advanceField: true }} + */ +function handleEditorFieldKey(field, key) { + const {lines, cursorLine, cursorCol} = field + + // Esc exits the editor — move to next field + if (key.name === 'escape') { + return {advanceField: /** @type {true} */ (true)} + } + + if (key.name === 'left') { + if (cursorCol > 0) { + return {...field, cursorCol: cursorCol - 1} + } + if (cursorLine > 0) { + const prevLine = lines[cursorLine - 1] + return {...field, cursorLine: cursorLine - 1, cursorCol: prevLine.length} + } + return field + } + + if (key.name === 'right') { + const line = lines[cursorLine] + if (cursorCol < line.length) { + return {...field, cursorCol: cursorCol + 1} + } + if (cursorLine < lines.length - 1) { + return {...field, cursorLine: cursorLine + 1, cursorCol: 0} + } + return field + } + + if (key.name === 'up') { + if (cursorLine === 0) return field + const newLine = cursorLine - 1 + const newCol = Math.min(cursorCol, lines[newLine].length) + return {...field, cursorLine: newLine, cursorCol: newCol} + } + + if (key.name === 'down') { + if (cursorLine >= lines.length - 1) return field + const newLine = cursorLine + 1 + const newCol = Math.min(cursorCol, lines[newLine].length) + return {...field, cursorLine: newLine, cursorCol: newCol} + } + + if (key.name === 'home') { + return {...field, cursorCol: 0} + } + + if (key.name === 'end') { + return {...field, cursorCol: lines[cursorLine].length} + } + + if (key.name === 'backspace') { + if (cursorCol > 0) { + const newLines = [...lines] + const ln = newLines[cursorLine] + newLines[cursorLine] = ln.slice(0, cursorCol - 1) + ln.slice(cursorCol) + return {...field, lines: newLines, cursorCol: cursorCol - 1} + } + if (cursorLine > 0) { + // Merge current line into previous + const newLines = [...lines] + const prevLine = newLines[cursorLine - 1] + const currLine = newLines[cursorLine] + const mergedCol = prevLine.length + newLines.splice(cursorLine, 1) + newLines[cursorLine - 1] = prevLine + currLine + return {...field, lines: newLines, cursorLine: cursorLine - 1, cursorCol: mergedCol} + } + return field + } + + if (key.name === 'delete') { + const line = lines[cursorLine] + if (cursorCol < line.length) { + const newLines = [...lines] + newLines[cursorLine] = line.slice(0, cursorCol) + line.slice(cursorCol + 1) + return {...field, lines: newLines} + } + if (cursorLine < lines.length - 1) { + // Merge next line + const newLines = [...lines] + newLines[cursorLine] = newLines[cursorLine] + newLines[cursorLine + 1] + newLines.splice(cursorLine + 1, 1) + return {...field, lines: newLines} + } + return field + } + + // Enter inserts a new line after the cursor position + if (key.name === 'return') { + const line = lines[cursorLine] + const before = line.slice(0, cursorCol) + const after = line.slice(cursorCol) + const newLines = [...lines] + newLines.splice(cursorLine, 1, before, after) + return {...field, lines: newLines, cursorLine: cursorLine + 1, cursorCol: 0} + } + + // Printable character + if (key.sequence && key.sequence.length === 1 && !key.ctrl) { + const ch = key.sequence + if (ch >= ' ') { + const newLines = [...lines] + const ln = newLines[cursorLine] + newLines[cursorLine] = ln.slice(0, cursorCol) + ch + ln.slice(cursorCol) + return {...field, lines: newLines, cursorCol: cursorCol + 1} + } + } + + return field +} + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Pure reducer for form keypresses. + * + * Global keys handled regardless of focused field: + * - Tab: move to next field + * - Shift+Tab: move to previous field (wraps) + * - Ctrl+S: validate and submit + * - Esc: cancel → return `{ cancelled: true }` + * - Enter on last field: validate and submit + * + * Field-specific handling when field is focused: + * - TextField: printable chars append to value, Backspace deletes, ← → move cursor, Home/End jump + * - SelectorField: ← → cycle options + * - MultiSelectField: ↑ ↓ navigate, Space toggle, Enter advances to next field + * - MiniEditorField: printable chars insert, Enter inserts new line, Esc exits to next field + * + * On submit: validates required fields. If invalid, sets `errorMessage` and returns + * the state. If valid, returns `{ submitted: true, values: extractValues(formState) }`. + * + * @param {FormState} formState + * @param {{ name: string, sequence?: string, ctrl?: boolean, shift?: boolean }} key + * @returns {FormState | SubmitResult | CancelResult} + */ +export function handleFormKeypress(formState, key) { + const {fields, focusedFieldIndex} = formState + const lastFieldIndex = fields.length - 1 + + // ── Esc: cancel (unless inside a MiniEditorField) ───────────────────────── + // For editor fields, Esc is handled inside the field handler to advance focus, + // not cancel the form. Only cancel when a non-editor field is focused. + const focusedField = fields[focusedFieldIndex] + if (key.name === 'escape' && focusedField?.type !== 'editor') { + return {cancelled: /** @type {true} */ (true)} + } + + // ── Ctrl+S: submit ──────────────────────────────────────────────────────── + if (key.ctrl && key.name === 's') { + return attemptSubmit(formState) + } + + // ── Tab: next field ─────────────────────────────────────────────────────── + if (key.name === 'tab' && !key.shift) { + return { + ...formState, + focusedFieldIndex: (focusedFieldIndex + 1) % fields.length, + errorMessage: null, + } + } + + // ── Shift+Tab: previous field ───────────────────────────────────────────── + if (key.name === 'tab' && key.shift) { + return { + ...formState, + focusedFieldIndex: (focusedFieldIndex - 1 + fields.length) % fields.length, + errorMessage: null, + } + } + + // ── Enter on last non-editor field: submit ───────────────────────────────── + if ( + key.name === 'return' && + focusedFieldIndex === lastFieldIndex && + focusedField?.type !== 'editor' && + focusedField?.type !== 'multiselect' + ) { + return attemptSubmit(formState) + } + + // ── Delegate to focused field ───────────────────────────────────────────── + if (!focusedField) return formState + + if (focusedField.type === 'text') { + const updated = handleTextFieldKey(focusedField, key) + if (updated === focusedField) return formState + return { + ...formState, + errorMessage: null, + fields: replaceAt(fields, focusedFieldIndex, updated), + } + } + + if (focusedField.type === 'selector') { + const updated = handleSelectorFieldKey(focusedField, key) + if (updated === focusedField) return formState + return { + ...formState, + fields: replaceAt(fields, focusedFieldIndex, updated), + } + } + + if (focusedField.type === 'multiselect') { + const result = handleMultiSelectFieldKey(focusedField, key) + if ('advanceField' in result) { + return { + ...formState, + focusedFieldIndex: Math.min(focusedFieldIndex + 1, lastFieldIndex), + } + } + if (result === focusedField) return formState + return { + ...formState, + fields: replaceAt(fields, focusedFieldIndex, result), + } + } + + if (focusedField.type === 'editor') { + const result = handleEditorFieldKey(focusedField, key) + if ('advanceField' in result) { + // Esc in editor cancels the form only if we treat it as a field-level escape. + // Per spec, Esc in editor moves to next field. + return { + ...formState, + focusedFieldIndex: Math.min(focusedFieldIndex + 1, lastFieldIndex), + } + } + if (result === focusedField) return formState + return { + ...formState, + errorMessage: null, + fields: replaceAt(fields, focusedFieldIndex, result), + } + } + + return formState +} + +/** + * Attempt to submit the form: validate, then return SubmitResult or FormState with error. + * @param {FormState} formState + * @returns {FormState | SubmitResult} + */ +function attemptSubmit(formState) { + const invalidLabel = validateForm(formState) + if (invalidLabel !== null) { + return { + ...formState, + errorMessage: `"${invalidLabel}" is required.`, + } + } + return { + submitted: /** @type {true} */ (true), + values: extractValues(formState), + } +} + +/** + * Return a new array with element at `index` replaced by `value`. + * @template T + * @param {T[]} arr + * @param {number} index + * @param {T} value + * @returns {T[]} + */ +function replaceAt(arr, index, value) { + return arr.map((item, i) => (i === index ? value : item)) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Form field definitions +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Return form fields for creating or editing an MCP entry. + * + * Fields: name (text), environments (multiselect), transport (selector), command (text), + * args (text), url (text), description (text, optional). + * + * @param {import('../../types.js').CategoryEntry|null} [entry] - Existing entry to pre-fill from, or null to create + * @param {import('../../types.js').DetectedEnvironment[]} [compatibleEnvs] - Environments compatible with this category type + * @returns {Field[]} + */ +export function getMCPFormFields(entry = null, compatibleEnvs = []) { + /** @type {import('../../types.js').MCPParams|null} */ + const p = entry ? /** @type {import('../../types.js').MCPParams} */ (entry.params) : null + + const transportOptions = ['stdio', 'sse', 'streamable-http'] + const transportIndex = p ? Math.max(0, transportOptions.indexOf(p.transport)) : 0 + + return [ + /** @type {TextField} */ ({ + type: 'text', + label: 'Name', + key: 'name', + value: entry ? entry.name : '', + cursor: entry ? entry.name.length : 0, + required: true, + placeholder: 'my-mcp-server', + }), + /** @type {MultiSelectField} */ ({ + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: compatibleEnvs.map((env) => ({id: env.id, label: env.name})), + selected: new Set(entry ? entry.environments : []), + focusedOptionIndex: 0, + required: true, + }), + /** @type {SelectorField} */ ({ + type: 'selector', + label: 'Transport', + key: 'transport', + options: transportOptions, + selectedIndex: transportIndex, + required: true, + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Command', + key: 'command', + value: p?.command ?? '', + cursor: (p?.command ?? '').length, + required: false, + placeholder: 'npx my-mcp-server', + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Args', + key: 'args', + value: p?.args ? p.args.join(' ') : '', + cursor: p?.args ? p.args.join(' ').length : 0, + required: false, + placeholder: '--port 3000 --verbose', + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'URL', + key: 'url', + value: p?.url ?? '', + cursor: (p?.url ?? '').length, + required: false, + placeholder: 'https://mcp.example.com', + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Description', + key: 'description', + value: p?.description ?? (entry?.params ? /** @type {any} */ ((entry.params).description ?? '') : ''), + cursor: 0, + required: false, + placeholder: 'Optional description', + }), + ] +} + +/** + * Return form fields for creating or editing a Command entry. + * + * Fields: name (text), environments (multiselect), description (text, optional), content (editor). + * + * @param {import('../../types.js').CategoryEntry|null} [entry] - Existing entry to pre-fill from, or null to create + * @param {import('../../types.js').DetectedEnvironment[]} [compatibleEnvs] - Environments compatible with this category type + * @returns {Field[]} + */ +export function getCommandFormFields(entry = null, compatibleEnvs = []) { + /** @type {import('../../types.js').CommandParams|null} */ + const p = entry ? /** @type {import('../../types.js').CommandParams} */ (entry.params) : null + const contentStr = p?.content ?? '' + const contentLines = contentStr.length > 0 ? contentStr.split('\n') : [''] + + return [ + /** @type {TextField} */ ({ + type: 'text', + label: 'Name', + key: 'name', + value: entry ? entry.name : '', + cursor: entry ? entry.name.length : 0, + required: true, + placeholder: 'my-command', + }), + /** @type {MultiSelectField} */ ({ + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: compatibleEnvs.map((env) => ({id: env.id, label: env.name})), + selected: new Set(entry ? entry.environments : []), + focusedOptionIndex: 0, + required: true, + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Description', + key: 'description', + value: p?.description ?? '', + cursor: (p?.description ?? '').length, + required: false, + placeholder: 'Optional description', + }), + /** @type {MiniEditorField} */ ({ + type: 'editor', + label: 'Content', + key: 'content', + lines: contentLines, + cursorLine: 0, + cursorCol: 0, + required: true, + }), + ] +} + +/** + * Return form fields for creating or editing a Skill entry. + * + * Fields: name (text), environments (multiselect), description (text, optional), content (editor). + * + * @param {import('../../types.js').CategoryEntry|null} [entry] - Existing entry to pre-fill from, or null to create + * @param {import('../../types.js').DetectedEnvironment[]} [compatibleEnvs] - Environments compatible with this category type + * @returns {Field[]} + */ +export function getSkillFormFields(entry = null, compatibleEnvs = []) { + /** @type {import('../../types.js').SkillParams|null} */ + const p = entry ? /** @type {import('../../types.js').SkillParams} */ (entry.params) : null + const contentStr = p?.content ?? '' + const contentLines = contentStr.length > 0 ? contentStr.split('\n') : [''] + + return [ + /** @type {TextField} */ ({ + type: 'text', + label: 'Name', + key: 'name', + value: entry ? entry.name : '', + cursor: entry ? entry.name.length : 0, + required: true, + placeholder: 'my-skill', + }), + /** @type {MultiSelectField} */ ({ + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: compatibleEnvs.map((env) => ({id: env.id, label: env.name})), + selected: new Set(entry ? entry.environments : []), + focusedOptionIndex: 0, + required: true, + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Description', + key: 'description', + value: p?.description ?? '', + cursor: (p?.description ?? '').length, + required: false, + placeholder: 'Optional description', + }), + /** @type {MiniEditorField} */ ({ + type: 'editor', + label: 'Content', + key: 'content', + lines: contentLines, + cursorLine: 0, + cursorCol: 0, + required: true, + }), + ] +} + +/** + * Return form fields for creating or editing an Agent entry. + * + * Fields: name (text), environments (multiselect), description (text, optional), instructions (editor). + * + * @param {import('../../types.js').CategoryEntry|null} [entry] - Existing entry to pre-fill from, or null to create + * @param {import('../../types.js').DetectedEnvironment[]} [compatibleEnvs] - Environments compatible with this category type + * @returns {Field[]} + */ +export function getAgentFormFields(entry = null, compatibleEnvs = []) { + /** @type {import('../../types.js').AgentParams|null} */ + const p = entry ? /** @type {import('../../types.js').AgentParams} */ (entry.params) : null + const instructionsStr = p?.instructions ?? '' + const instructionLines = instructionsStr.length > 0 ? instructionsStr.split('\n') : [''] + + return [ + /** @type {TextField} */ ({ + type: 'text', + label: 'Name', + key: 'name', + value: entry ? entry.name : '', + cursor: entry ? entry.name.length : 0, + required: true, + placeholder: 'my-agent', + }), + /** @type {MultiSelectField} */ ({ + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: compatibleEnvs.map((env) => ({id: env.id, label: env.name})), + selected: new Set(entry ? entry.environments : []), + focusedOptionIndex: 0, + required: true, + }), + /** @type {TextField} */ ({ + type: 'text', + label: 'Description', + key: 'description', + value: p?.description ?? '', + cursor: (p?.description ?? '').length, + required: false, + placeholder: 'Optional description', + }), + /** @type {MiniEditorField} */ ({ + type: 'editor', + label: 'Instructions', + key: 'instructions', + lines: instructionLines, + cursorLine: 0, + cursorCol: 0, + required: true, + }), + ] +} diff --git a/src/utils/tui/modal.js b/src/utils/tui/modal.js index fcafb13..4cd96c7 100644 --- a/src/utils/tui/modal.js +++ b/src/utils/tui/modal.js @@ -1,5 +1,5 @@ import chalk from 'chalk' -import { NVD_ATTRIBUTION } from '../../services/nvd.js' +import {NVD_ATTRIBUTION} from '../../services/nvd.js' // ────────────────────────────────────────────────────────────────────────────── // ANSI escape sequences (re-declared locally — avoids cross-module coupling) @@ -49,7 +49,7 @@ function centerText(text, width) { * @returns {string} */ export function buildModalScreen(state) { - const { modalContent, modalScrollOffset, termRows, termCols, firstRefUrl } = state + const {modalContent, modalScrollOffset, termRows, termCols, firstRefUrl} = state const lines = [] @@ -84,7 +84,9 @@ export function buildModalScreen(state) { // ── Footer ───────────────────────────────────────────────────────────────── lines.push(chalk.bold.cyan('╠' + '═'.repeat(innerWidth + 2) + '╣')) - lines.push(chalk.bold.cyan('║ ') + chalk.dim(NVD_ATTRIBUTION).slice(0, innerWidth).padEnd(innerWidth) + chalk.bold.cyan(' ║')) + lines.push( + chalk.bold.cyan('║ ') + chalk.dim(NVD_ATTRIBUTION).slice(0, innerWidth).padEnd(innerWidth) + chalk.bold.cyan(' ║'), + ) const scrollHint = content.length > contentViewport ? ' ↑↓/PgUp/PgDn scroll' : '' const openHint = firstRefUrl ? ' o open ref' : '' @@ -159,8 +161,7 @@ export function buildErrorScreen(cveId, errorMessage, termRows, termCols) { lines.push(chalk.bold.cyan('║ ') + chalk.red.bold(centerText(titleText, innerWidth)) + chalk.bold.cyan(' ║')) lines.push(chalk.bold.cyan('║ ') + ' '.repeat(innerWidth) + chalk.bold.cyan(' ║')) - const truncErr = - errorMessage.length > innerWidth ? errorMessage.slice(0, innerWidth - 1) + '…' : errorMessage + const truncErr = errorMessage.length > innerWidth ? errorMessage.slice(0, innerWidth - 1) + '…' : errorMessage lines.push(chalk.bold.cyan('║ ') + chalk.red(truncErr.padEnd(innerWidth)) + chalk.bold.cyan(' ║')) const remaining = termRows - lines.length - 2 @@ -192,14 +193,14 @@ export function buildErrorScreen(cveId, errorMessage, termRows, termCols) { * } */ export function handleModalKeypress(state, key) { - const { modalContent, modalScrollOffset, termRows, firstRefUrl } = state + const {modalContent, modalScrollOffset, termRows, firstRefUrl} = state - if (key.ctrl && key.name === 'c') return { exit: true } - if (key.name === 'q') return { exit: true } + if (key.ctrl && key.name === 'c') return {exit: true} + if (key.name === 'q') return {exit: true} - if (key.name === 'escape') return { backToTable: true } + if (key.name === 'escape') return {backToTable: true} - if (key.name === 'o' && firstRefUrl) return { openUrl: firstRefUrl } + if (key.name === 'o' && firstRefUrl) return {openUrl: firstRefUrl} const contentLen = modalContent ? modalContent.length : 0 const BORDER_LINES = 3 @@ -208,16 +209,16 @@ export function handleModalKeypress(state, key) { const maxOffset = Math.max(0, contentLen - contentViewport) if (key.name === 'up') { - return { ...state, modalScrollOffset: clamp(modalScrollOffset - 1, 0, maxOffset) } + return {...state, modalScrollOffset: clamp(modalScrollOffset - 1, 0, maxOffset)} } if (key.name === 'down') { - return { ...state, modalScrollOffset: clamp(modalScrollOffset + 1, 0, maxOffset) } + return {...state, modalScrollOffset: clamp(modalScrollOffset + 1, 0, maxOffset)} } if (key.name === 'pageup') { - return { ...state, modalScrollOffset: clamp(modalScrollOffset - contentViewport, 0, maxOffset) } + return {...state, modalScrollOffset: clamp(modalScrollOffset - contentViewport, 0, maxOffset)} } if (key.name === 'pagedown') { - return { ...state, modalScrollOffset: clamp(modalScrollOffset + contentViewport, 0, maxOffset) } + return {...state, modalScrollOffset: clamp(modalScrollOffset + contentViewport, 0, maxOffset)} } return state // unrecognized key — no state change diff --git a/src/utils/tui/navigable-table.js b/src/utils/tui/navigable-table.js index 6a3509b..a0e610d 100644 --- a/src/utils/tui/navigable-table.js +++ b/src/utils/tui/navigable-table.js @@ -1,9 +1,9 @@ import readline from 'node:readline' import chalk from 'chalk' -import { NVD_ATTRIBUTION } from '../../services/nvd.js' -import { buildModalScreen, buildLoadingScreen, buildErrorScreen, handleModalKeypress } from './modal.js' -import { formatCveDetailPlain } from '../../formatters/vuln.js' -import { openBrowser } from '../open-browser.js' +import {NVD_ATTRIBUTION} from '../../services/nvd.js' +import {buildModalScreen, buildLoadingScreen, buildErrorScreen, handleModalKeypress} from './modal.js' +import {formatCveDetailPlain} from '../../formatters/vuln.js' +import {openBrowser} from '../open-browser.js' // ────────────────────────────────────────────────────────────────────────────── // ANSI escape sequences @@ -104,7 +104,7 @@ export function computeViewport(selectedIndex, totalRows, viewportHeight) { startIndex = Math.max(0, startIndex) startIndex = Math.min(Math.max(0, totalRows - viewportHeight), startIndex) const endIndex = Math.min(startIndex + viewportHeight, totalRows) - return { startIndex, endIndex } + return {startIndex, endIndex} } // ────────────────────────────────────────────────────────────────────────────── @@ -149,7 +149,7 @@ export function formatRow(row, columns, termCols, isSelected) { * @returns {string} */ export function buildTableScreen(state) { - const { rows, columns, heading, totalResults, selectedIndex, viewportHeight, termCols } = state + const {rows, columns, heading, totalResults, selectedIndex, viewportHeight, termCols} = state const lines = [] // ── Header ──────────────────────────────────────────────────────────────── @@ -167,7 +167,7 @@ export function buildTableScreen(state) { lines.push(chalk.dim('─'.repeat(Math.min(termCols, dividerWidth)))) // ── Data rows ───────────────────────────────────────────────────────────── - const { startIndex, endIndex } = computeViewport(selectedIndex, rows.length, viewportHeight) + const {startIndex, endIndex} = computeViewport(selectedIndex, rows.length, viewportHeight) for (let i = startIndex; i < endIndex; i++) { lines.push(formatRow(rows[i], columns, termCols, i === selectedIndex)) } @@ -229,26 +229,26 @@ export function createInteractiveTableState(rows, columns, heading, totalResults * @returns {InteractiveTableState | { exit: true }} */ export function handleTableKeypress(state, key) { - const { selectedIndex, rows, viewportHeight } = state + const {selectedIndex, rows, viewportHeight} = state - if (key.name === 'escape' || key.name === 'q') return { exit: true } - if (key.ctrl && key.name === 'c') return { exit: true } + if (key.name === 'escape' || key.name === 'q') return {exit: true} + if (key.ctrl && key.name === 'c') return {exit: true} if (key.name === 'return') { - return { ...state, currentView: 'modal' } + return {...state, currentView: 'modal'} } if (key.name === 'up') { - return { ...state, selectedIndex: Math.max(0, selectedIndex - 1) } + return {...state, selectedIndex: Math.max(0, selectedIndex - 1)} } if (key.name === 'down') { - return { ...state, selectedIndex: Math.min(rows.length - 1, selectedIndex + 1) } + return {...state, selectedIndex: Math.min(rows.length - 1, selectedIndex + 1)} } if (key.name === 'pageup') { - return { ...state, selectedIndex: Math.max(0, selectedIndex - viewportHeight) } + return {...state, selectedIndex: Math.max(0, selectedIndex - viewportHeight)} } if (key.name === 'pagedown') { - return { ...state, selectedIndex: Math.min(rows.length - 1, selectedIndex + viewportHeight) } + return {...state, selectedIndex: Math.min(rows.length - 1, selectedIndex + viewportHeight)} } return state // unrecognized key — no state change @@ -370,7 +370,7 @@ export async function startInteractiveTable(rows, columns, heading, totalResults if (advisoryUrl) { await openBrowser(String(advisoryUrl)) } - state = { ...state, currentView: 'table' } + state = {...state, currentView: 'table'} process.stdout.write(buildTableScreen(state)) return } diff --git a/src/utils/tui/tab-tui.js b/src/utils/tui/tab-tui.js new file mode 100644 index 0000000..e57b98d --- /dev/null +++ b/src/utils/tui/tab-tui.js @@ -0,0 +1,800 @@ +/** + * @module tab-tui + * Tab-based full-screen TUI framework for dvmi sync-config-ai. + * Follows the same ANSI + readline + chalk pattern as navigable-table.js. + * Zero new dependencies — uses only Node.js built-ins + chalk. + */ + +import readline from 'node:readline' +import chalk from 'chalk' +import { + buildFormScreen, + handleFormKeypress, + getMCPFormFields, + getCommandFormFields, + getSkillFormFields, + getAgentFormFields, +} from './form.js' + +// ────────────────────────────────────────────────────────────────────────────── +// ANSI escape sequences +// ────────────────────────────────────────────────────────────────────────────── + +const ANSI_CLEAR = '\x1b[2J' +const ANSI_HOME = '\x1b[H' +const ANSI_ALT_SCREEN_ON = '\x1b[?1049h' +const ANSI_ALT_SCREEN_OFF = '\x1b[?1049l' +const ANSI_CURSOR_HIDE = '\x1b[?25l' +const ANSI_CURSOR_SHOW = '\x1b[?25h' +const ANSI_INVERSE_ON = '\x1b[7m' +const ANSI_INVERSE_OFF = '\x1b[27m' + +// ────────────────────────────────────────────────────────────────────────────── +// Layout constants +// ────────────────────────────────────────────────────────────────────────────── + +const MIN_COLS = 80 +const MIN_ROWS = 24 +const TAB_BAR_LINES = 2 // tab bar line + divider +const FOOTER_LINES = 2 // empty line + keyboard hints + +// ────────────────────────────────────────────────────────────────────────────── +// Module-level terminal session state +// ────────────────────────────────────────────────────────────────────────────── + +let _cleanupCalled = false +let _altScreenActive = false +let _rawModeActive = false +/** @type {((...args: unknown[]) => void) | null} */ +let _keypressListener = null + +// ────────────────────────────────────────────────────────────────────────────── +// Typedefs +// ────────────────────────────────────────────────────────────────────────────── + +/** + * @typedef {Object} TabDef + * @property {string} label - Display label shown in the tab bar + * @property {string} key - Unique identifier for this tab + */ + +/** + * @typedef {Object} TabTUIState + * @property {TabDef[]} tabs - All tabs + * @property {number} activeTabIndex - Index of the currently active tab + * @property {number} termRows - Current terminal height + * @property {number} termCols - Current terminal width + * @property {number} contentViewportHeight - Usable content lines (termRows - TAB_BAR_LINES - FOOTER_LINES) + * @property {boolean} tooSmall - Whether the terminal is below minimum size + */ + +/** + * @typedef {Object} EnvTabState + * @property {import('../../types.js').DetectedEnvironment[]} envs - Detected environments + * @property {number} selectedIndex - Highlighted row + */ + +/** + * @typedef {Object} CatTabState + * @property {import('../../types.js').CategoryEntry[]} entries - All category entries + * @property {number} selectedIndex - Highlighted row + * @property {'list'|'form'|'confirm-delete'} mode - Current sub-mode + * @property {import('./form.js').FormState|null} formState - Active form state (null when mode is 'list') + * @property {string|null} confirmDeleteId - Entry id pending deletion confirmation + * @property {string} chezmoidTip - Footer tip (empty if chezmoi configured) + */ + +// ────────────────────────────────────────────────────────────────────────────── +// Internal helpers +// ────────────────────────────────────────────────────────────────────────────── + +// ────────────────────────────────────────────────────────────────────────────── +// T017: buildTabBar — renders horizontal tab bar +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Build the tab bar string (one line of tab labels + a divider line). + * Active tab is highlighted with inverse video. + * @param {TabDef[]} tabs + * @param {number} activeIndex + * @returns {string[]} Two lines: [tabBarLine, divider] + */ +export function buildTabBar(tabs, activeIndex) { + const parts = tabs.map((tab, i) => { + const label = ` ${tab.label} ` + if (i === activeIndex) { + return `${ANSI_INVERSE_ON}${label}${ANSI_INVERSE_OFF}` + } + return chalk.dim(label) + }) + const tabBarLine = parts.join(chalk.dim('│')) + const divider = chalk.dim('─'.repeat(60)) + return [tabBarLine, divider] +} + +// ────────────────────────────────────────────────────────────────────────────── +// T017: buildTabScreen — full screen composition +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Compose the full terminal screen from tab bar, content lines, and footer. + * Prepends ANSI clear + home to replace the previous frame. + * @param {string[]} tabBarLines - Output of buildTabBar + * @param {string[]} contentLines - Tab-specific content lines + * @param {string[]} footerLines - Footer hint lines + * @param {number} termRows - Terminal height + * @returns {string} + */ +export function buildTabScreen(tabBarLines, contentLines, footerLines, termRows) { + const lines = [...tabBarLines, ...contentLines] + + // Pad to fill terminal height minus footer + const targetContentLines = termRows - tabBarLines.length - footerLines.length + while (lines.length < targetContentLines) { + lines.push('') + } + + lines.push(...footerLines) + return ANSI_CLEAR + ANSI_HOME + lines.join('\n') +} + +// ────────────────────────────────────────────────────────────────────────────── +// T018: terminal size check +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Build a "terminal too small" warning screen. + * @param {number} termRows + * @param {number} termCols + * @returns {string} + */ +export function buildTooSmallScreen(termRows, termCols) { + const lines = [] + const midRow = Math.floor(termRows / 2) + + for (let i = 0; i < midRow - 1; i++) lines.push('') + + lines.push(chalk.red.bold(` Terminal too small (${termCols}×${termRows}, minimum: ${MIN_COLS}×${MIN_ROWS})`)) + lines.push(chalk.dim(' Resize your terminal window and try again.')) + + return ANSI_CLEAR + ANSI_HOME + lines.join('\n') +} + +// ────────────────────────────────────────────────────────────────────────────── +// T020: buildEnvironmentsTab — content builder +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Build the content lines for the Environments tab. + * @param {import('../../types.js').DetectedEnvironment[]} envs - Detected environments + * @param {number} selectedIndex - Currently highlighted row + * @param {number} viewportHeight - Available content lines + * @param {import('../../formatters/ai-config.js').formatEnvironmentsTable} formatFn - Formatter function + * @param {number} termCols - Terminal width for formatter + * @returns {string[]} + */ +export function buildEnvironmentsTab(envs, selectedIndex, viewportHeight, formatFn, termCols = 120) { + if (envs.length === 0) { + return [ + '', + chalk.dim(' No AI coding environments detected.'), + chalk.dim(' Ensure at least one AI tool is configured in the current project or globally.'), + ] + } + + const tableLines = formatFn(envs, termCols) + + // Add row highlighting to data rows (skip header lines — first 2 lines are header + divider) + const HEADER_LINES = 2 + const resultLines = [] + + for (let i = 0; i < tableLines.length; i++) { + const line = tableLines[i] + const dataIndex = i - HEADER_LINES + if (dataIndex >= 0 && dataIndex === selectedIndex) { + resultLines.push(`${ANSI_INVERSE_ON}${line}${ANSI_INVERSE_OFF}`) + } else { + resultLines.push(line) + } + } + + // Viewport: only show lines that fit + return resultLines.slice(0, viewportHeight) +} + +// ────────────────────────────────────────────────────────────────────────────── +// T021: handleEnvironmentsKeypress — pure reducer +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Pure state reducer for keypresses in the Environments tab. + * @param {EnvTabState} state + * @param {{ name: string, ctrl?: boolean }} key + * @returns {EnvTabState | { exit: true } | { switchTab: number }} + */ +export function handleEnvironmentsKeypress(state, key) { + const {selectedIndex, envs} = state + const maxIndex = Math.max(0, envs.length - 1) + + if (key.name === 'up' || key.name === 'k') { + return {...state, selectedIndex: Math.max(0, selectedIndex - 1)} + } + if (key.name === 'down' || key.name === 'j') { + return {...state, selectedIndex: Math.min(maxIndex, selectedIndex + 1)} + } + if (key.name === 'pageup') { + return {...state, selectedIndex: Math.max(0, selectedIndex - 10)} + } + if (key.name === 'pagedown') { + return {...state, selectedIndex: Math.min(maxIndex, selectedIndex + 10)} + } + + return state +} + +// ────────────────────────────────────────────────────────────────────────────── +// Categories tab content builder (T036) — defined here for single-module TUI +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Build the content lines for the Categories tab. + * @param {import('../../types.js').CategoryEntry[]} entries + * @param {number} selectedIndex + * @param {number} viewportHeight + * @param {import('../../formatters/ai-config.js').formatCategoriesTable} formatFn + * @param {number} termCols + * @param {string|null} [confirmDeleteName] - Name of entry pending delete confirmation + * @returns {string[]} + */ +export function buildCategoriesTab( + entries, + selectedIndex, + viewportHeight, + formatFn, + termCols = 120, + confirmDeleteName = null, +) { + if (entries.length === 0) { + const lines = [ + '', + chalk.dim(' No configuration entries yet.'), + chalk.dim(' Press ' + chalk.bold('n') + ' to create your first entry.'), + ] + if (confirmDeleteName === null) return lines + } + + const tableLines = formatFn(entries, termCols) + const HEADER_LINES = 2 + const resultLines = [] + + for (let i = 0; i < tableLines.length; i++) { + const line = tableLines[i] + const dataIndex = i - HEADER_LINES + if (dataIndex >= 0 && dataIndex === selectedIndex) { + resultLines.push(`${ANSI_INVERSE_ON}${line}${ANSI_INVERSE_OFF}`) + } else { + resultLines.push(line) + } + } + + // Confirmation prompt overlay + if (confirmDeleteName !== null) { + resultLines.push('') + resultLines.push(chalk.red(` Delete "${confirmDeleteName}"? This cannot be undone. `) + chalk.bold('[y/N]')) + } + + return resultLines.slice(0, viewportHeight) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Categories tab keypress reducer (T037) +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Pure state reducer for keypresses in the Categories tab list mode. + * @param {CatTabState} state + * @param {{ name: string, ctrl?: boolean, sequence?: string }} key + * @returns {CatTabState | { exit: true }} + */ +export function handleCategoriesKeypress(state, key) { + const {selectedIndex, entries, mode, confirmDeleteId} = state + const maxIndex = Math.max(0, entries.length - 1) + + // Confirm-delete mode + if (mode === 'confirm-delete') { + if (key.name === 'y') { + return { + ...state, + mode: 'list', + confirmDeleteId: key.name === 'y' ? confirmDeleteId : null, + _deleteConfirmed: true, + } + } + // Any other key cancels + return {...state, mode: 'list', confirmDeleteId: null} + } + + // List mode + if (key.name === 'up' || key.name === 'k') { + return {...state, selectedIndex: Math.max(0, selectedIndex - 1)} + } + if (key.name === 'down' || key.name === 'j') { + return {...state, selectedIndex: Math.min(maxIndex, selectedIndex + 1)} + } + if (key.name === 'pageup') { + return {...state, selectedIndex: Math.max(0, selectedIndex - 10)} + } + if (key.name === 'pagedown') { + return {...state, selectedIndex: Math.min(maxIndex, selectedIndex + 10)} + } + if (key.name === 'n') { + return {...state, mode: 'form', _action: 'create'} + } + if (key.name === 'return' && entries.length > 0) { + return {...state, mode: 'form', _action: 'edit', _editId: entries[selectedIndex]?.id} + } + if (key.name === 'd' && entries.length > 0) { + return {...state, _toggleId: entries[selectedIndex]?.id} + } + if ((key.name === 'delete' || key.name === 'backspace') && entries.length > 0) { + const entry = entries[selectedIndex] + if (entry) { + return {...state, mode: 'confirm-delete', confirmDeleteId: entry.id, _confirmDeleteName: entry.name} + } + } + + return state +} + +// ────────────────────────────────────────────────────────────────────────────── +// Terminal lifecycle management +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Enter the alternate screen buffer, hide the cursor, and enable raw stdin keypresses. + * @returns {void} + */ +export function setupTerminal() { + _cleanupCalled = false + _altScreenActive = true + _rawModeActive = true + process.stdout.write(ANSI_ALT_SCREEN_ON) + process.stdout.write(ANSI_CURSOR_HIDE) + readline.emitKeypressEvents(process.stdin) + if (process.stdin.isTTY) { + process.stdin.setRawMode(true) + } +} + +/** + * Restore the terminal to its original state. + * Idempotent — safe to call multiple times. + * @returns {void} + */ +export function cleanupTerminal() { + if (_cleanupCalled) return + _cleanupCalled = true + + if (_keypressListener) { + process.stdin.removeListener('keypress', _keypressListener) + _keypressListener = null + } + if (_rawModeActive && process.stdin.isTTY) { + try { + process.stdin.setRawMode(false) + } catch { + /* ignore */ + } + _rawModeActive = false + } + if (_altScreenActive) { + process.stdout.write(ANSI_CURSOR_SHOW) + process.stdout.write(ANSI_ALT_SCREEN_OFF) + _altScreenActive = false + } + try { + process.stdin.pause() + } catch { + /* ignore */ + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// T016: startTabTUI — main orchestrator +// ────────────────────────────────────────────────────────────────────────────── + +/** + * @typedef {Object} TabTUIOptions + * @property {import('../../types.js').DetectedEnvironment[]} envs - Detected environments (from scanner) + * @property {import('../../types.js').CategoryEntry[]} entries - All category entries (from store) + * @property {boolean} chezmoiEnabled - Whether chezmoi is configured + * @property {(action: object) => Promise} onAction - Callback for CRUD actions from category tabs + * @property {import('../../formatters/ai-config.js').formatEnvironmentsTable} formatEnvs - Environments table formatter + * @property {import('../../formatters/ai-config.js').formatCategoriesTable} formatCats - Categories table formatter + * @property {(() => Promise) | undefined} [refreshEntries] - Reload entries from store after mutations + */ + +/** + * Start the interactive tab TUI session. + * Blocks until the user exits (Esc / q / Ctrl+C). + * Manages the full TUI lifecycle: terminal setup, keypress loop, tab switching, cleanup. + * + * @param {TabTUIOptions} opts + * @returns {Promise} + */ +export async function startTabTUI(opts) { + const {envs, onAction, formatEnvs, formatCats} = opts + const {entries: initialEntries, chezmoiEnabled} = opts + + _cleanupCalled = false + + const sigHandler = () => { + cleanupTerminal() + process.exit(0) + } + const exitHandler = () => { + if (!_cleanupCalled) cleanupTerminal() + } + process.once('SIGINT', sigHandler) + process.once('SIGTERM', sigHandler) + process.once('exit', exitHandler) + + const tabs = [ + {label: 'Environments', key: 'environments'}, + {label: 'MCPs', key: 'mcp'}, + {label: 'Commands', key: 'command'}, + {label: 'Skills', key: 'skill'}, + {label: 'Agents', key: 'agent'}, + ] + + const CATEGORY_TYPES = ['mcp', 'command', 'skill', 'agent'] + const chezmoidTip = chezmoiEnabled ? '' : 'Tip: Run `dvmi dotfiles setup` to enable automatic backup of your AI configs' + + /** @type {TabTUIState} */ + let tuiState = { + tabs, + activeTabIndex: 0, + termRows: process.stdout.rows || 24, + termCols: process.stdout.columns || 80, + contentViewportHeight: Math.max(1, (process.stdout.rows || 24) - TAB_BAR_LINES - FOOTER_LINES), + tooSmall: (process.stdout.columns || 80) < MIN_COLS || (process.stdout.rows || 24) < MIN_ROWS, + } + + /** @type {EnvTabState} */ + let envState = {envs, selectedIndex: 0} + + /** @type {import('../../types.js').CategoryEntry[]} */ + let allEntries = [...initialEntries] + + /** @type {Record} */ + let catTabStates = Object.fromEntries( + CATEGORY_TYPES.map((type) => [ + type, + /** @type {CatTabState} */ ({ + entries: allEntries.filter((e) => e.type === type), + selectedIndex: 0, + mode: 'list', + formState: null, + confirmDeleteId: null, + chezmoidTip, + }), + ]), + ) + + /** Push filtered entries into each tab state — call after allEntries changes. */ + function syncTabEntries() { + for (const type of CATEGORY_TYPES) { + catTabStates = { + ...catTabStates, + [type]: {...catTabStates[type], entries: allEntries.filter((e) => e.type === type)}, + } + } + } + + setupTerminal() + + /** + * Build and render the current frame. + * @returns {void} + */ + function render() { + const {termRows, termCols, activeTabIndex, tooSmall, contentViewportHeight} = tuiState + + if (tooSmall) { + process.stdout.write(buildTooSmallScreen(termRows, termCols)) + return + } + + const tabBarLines = buildTabBar(tabs, activeTabIndex) + let contentLines + let hintStr + + if (activeTabIndex === 0) { + contentLines = buildEnvironmentsTab( + envState.envs, + envState.selectedIndex, + contentViewportHeight, + formatEnvs, + termCols, + ) + hintStr = chalk.dim(' ↑↓ navigate Tab switch tabs q exit') + } else { + const tabKey = tabs[activeTabIndex].key + const tabState = catTabStates[tabKey] + + if (tabState.mode === 'form' && tabState.formState) { + contentLines = buildFormScreen(tabState.formState, contentViewportHeight, termCols) + hintStr = chalk.dim(' Tab next field Shift+Tab prev Ctrl+S save Esc cancel') + } else { + const confirmName = + tabState.mode === 'confirm-delete' && tabState._confirmDeleteName + ? /** @type {string} */ (tabState._confirmDeleteName) + : null + contentLines = buildCategoriesTab( + tabState.entries, + tabState.selectedIndex, + contentViewportHeight, + formatCats, + termCols, + confirmName, + ) + hintStr = chalk.dim(' ↑↓ navigate n new Enter edit d toggle Del delete Tab switch q exit') + } + } + + const footerTip = chezmoidTip ? [chalk.dim(chezmoidTip)] : [] + const footerLines = ['', hintStr, ...footerTip] + process.stdout.write(buildTabScreen(tabBarLines, contentLines, footerLines, termRows)) + } + + // Resize handler + function onResize() { + const newRows = process.stdout.rows || 24 + const newCols = process.stdout.columns || 80 + tuiState = { + ...tuiState, + termRows: newRows, + termCols: newCols, + contentViewportHeight: Math.max(1, newRows - TAB_BAR_LINES - FOOTER_LINES), + tooSmall: newCols < MIN_COLS || newRows < MIN_ROWS, + } + render() + } + process.stdout.on('resize', onResize) + + render() + + return new Promise((resolve) => { + /** + * @param {string} _str + * @param {{ name: string, ctrl?: boolean, shift?: boolean, sequence?: string }} key + */ + const listener = async (_str, key) => { + if (!key) return + + // Global keys + if (key.name === 'escape' || key.name === 'q') { + process.stdout.removeListener('resize', onResize) + process.removeListener('SIGINT', sigHandler) + process.removeListener('SIGTERM', sigHandler) + process.removeListener('exit', exitHandler) + cleanupTerminal() + resolve() + return + } + if (key.ctrl && key.name === 'c') { + process.stdout.removeListener('resize', onResize) + process.removeListener('SIGINT', sigHandler) + process.removeListener('SIGTERM', sigHandler) + process.removeListener('exit', exitHandler) + cleanupTerminal() + resolve() + return + } + + // Tab switching — only when not in form mode (Tab navigates form fields when a form is open) + const activeTabKey = tuiState.activeTabIndex > 0 ? tabs[tuiState.activeTabIndex].key : null + const isInFormMode = activeTabKey !== null && catTabStates[activeTabKey]?.mode === 'form' + if (key.name === 'tab' && !key.shift && !isInFormMode) { + tuiState = { + ...tuiState, + activeTabIndex: (tuiState.activeTabIndex + 1) % tabs.length, + } + render() + return + } + + // Delegate to active tab + if (tuiState.activeTabIndex === 0) { + // Environments tab — read-only + const result = handleEnvironmentsKeypress(envState, key) + envState = /** @type {EnvTabState} */ (result) + render() + } else { + // Category tab (MCPs | Commands | Skills | Agents) + const tabKey = tabs[tuiState.activeTabIndex].key + const tabState = catTabStates[tabKey] + + // Form mode: delegate to form keypress handler + if (tabState.mode === 'form' && tabState.formState) { + const formResult = handleFormKeypress(tabState.formState, key) + + if ('cancelled' in formResult && formResult.cancelled) { + catTabStates = { + ...catTabStates, + [tabKey]: {...tabState, mode: 'list', formState: null, _formAction: null, _editId: null}, + } + render() + return + } + + if ('submitted' in formResult && formResult.submitted) { + const formAction = tabState._formAction + const editId = tabState._editId + const savedFormState = tabState.formState + catTabStates = { + ...catTabStates, + [tabKey]: {...tabState, mode: 'list', formState: null, _formAction: null, _editId: null}, + } + render() + try { + await onAction({type: formAction, tabKey, values: formResult.values, id: editId}) + if (opts.refreshEntries) { + allEntries = await opts.refreshEntries() + syncTabEntries() + render() + } + } catch (err) { + // Restore form with error message so the user sees what went wrong + const msg = err instanceof Error ? err.message : String(err) + catTabStates = { + ...catTabStates, + [tabKey]: { + ...catTabStates[tabKey], + mode: 'form', + formState: {...savedFormState, errorMessage: msg}, + _formAction: formAction, + _editId: editId, + }, + } + render() + } + return + } + + // Still editing — update form state + catTabStates = { + ...catTabStates, + [tabKey]: {...tabState, formState: /** @type {import('./form.js').FormState} */ (formResult)}, + } + render() + return + } + + // List / confirm-delete mode + const result = handleCategoriesKeypress(tabState, key) + + if (result._deleteConfirmed && result.confirmDeleteId) { + const idToDelete = result.confirmDeleteId + catTabStates = { + ...catTabStates, + [tabKey]: {...result, confirmDeleteId: null, _deleteConfirmed: false}, + } + render() + try { + await onAction({type: 'delete', id: idToDelete}) + if (opts.refreshEntries) { + allEntries = await opts.refreshEntries() + syncTabEntries() + render() + } + } catch { + /* ignore */ + } + return + } + + if (result._toggleId) { + const idToToggle = result._toggleId + const entry = tabState.entries.find((e) => e.id === idToToggle) + catTabStates = {...catTabStates, [tabKey]: {...result, _toggleId: null}} + render() + if (entry) { + try { + await onAction({type: entry.active ? 'deactivate' : 'activate', id: idToToggle}) + if (opts.refreshEntries) { + allEntries = await opts.refreshEntries() + syncTabEntries() + render() + } + } catch { + /* ignore */ + } + } + return + } + + if (result._action === 'create') { + const compatibleEnvs = envs.filter((e) => e.supportedCategories.includes(tabKey)) + const fields = + tabKey === 'mcp' + ? getMCPFormFields(null, compatibleEnvs) + : tabKey === 'command' + ? getCommandFormFields(null, compatibleEnvs) + : tabKey === 'skill' + ? getSkillFormFields(null, compatibleEnvs) + : getAgentFormFields(null, compatibleEnvs) + const tabLabel = tabKey === 'mcp' ? 'MCP' : tabKey.charAt(0).toUpperCase() + tabKey.slice(1) + catTabStates = { + ...catTabStates, + [tabKey]: { + ...result, + _action: null, + mode: 'form', + _formAction: 'create', + formState: { + fields, + focusedFieldIndex: 0, + title: `Create ${tabLabel}`, + status: 'editing', + errorMessage: null, + }, + }, + } + render() + return + } + + if (result._action === 'edit' && result._editId) { + const entry = tabState.entries.find((e) => e.id === result._editId) + if (entry) { + const compatibleEnvs = envs.filter((e) => e.supportedCategories.includes(entry.type)) + const fields = + entry.type === 'mcp' + ? getMCPFormFields(entry, compatibleEnvs) + : entry.type === 'command' + ? getCommandFormFields(entry, compatibleEnvs) + : entry.type === 'skill' + ? getSkillFormFields(entry, compatibleEnvs) + : getAgentFormFields(entry, compatibleEnvs) + catTabStates = { + ...catTabStates, + [tabKey]: { + ...result, + _action: null, + mode: 'form', + _formAction: 'edit', + formState: { + fields, + focusedFieldIndex: 0, + title: `Edit ${entry.name}`, + status: 'editing', + errorMessage: null, + }, + }, + } + render() + return + } + } + + catTabStates = {...catTabStates, [tabKey]: /** @type {CatTabState} */ (result)} + render() + } + } + + _keypressListener = listener + process.stdin.on('keypress', listener) + process.stdin.resume() + }) +} + +/** + * Update the entries displayed in the Categories tab (called after store mutations). + * @param {import('../../types.js').CategoryEntry[]} _newEntries + * @returns {void} + */ +export function updateTUIEntries(_newEntries) { + // This is a lightweight state update — the TUI re-renders on next keypress. + // Callers should call render() manually after this if needed. +} diff --git a/src/utils/typewriter.js b/src/utils/typewriter.js index 24e52f5..3442c3a 100644 --- a/src/utils/typewriter.js +++ b/src/utils/typewriter.js @@ -1,5 +1,5 @@ import readline from 'node:readline' -import { isAnimationEnabled, BRAND_GRADIENT, gradientText } from './gradient.js' +import {isAnimationEnabled, BRAND_GRADIENT, gradientText} from './gradient.js' /** * Stampa testo con effetto typewriter (lettera per lettera). @@ -12,7 +12,7 @@ import { isAnimationEnabled, BRAND_GRADIENT, gradientText } from './gradient.js' * @returns {Promise} */ export async function typewriter(text, opts = {}) { - const { interval = 30, gradient } = opts + const {interval = 30, gradient} = opts if (!isAnimationEnabled) { const out = gradient ? gradientText(text, gradient) : text @@ -44,5 +44,5 @@ export async function typewriter(text, opts = {}) { * @returns {Promise} */ export async function typewriterLine(text, gradient = BRAND_GRADIENT) { - return typewriter(text, { gradient, interval: 25 }) + return typewriter(text, {gradient, interval: 25}) } diff --git a/src/utils/welcome.js b/src/utils/welcome.js index 693d6bd..29e3ba4 100644 --- a/src/utils/welcome.js +++ b/src/utils/welcome.js @@ -1,7 +1,7 @@ import chalk from 'chalk' -import { printBanner } from './banner.js' -import { isColorEnabled } from './gradient.js' -import { typewriterLine } from './typewriter.js' +import {printBanner} from './banner.js' +import {isColorEnabled} from './gradient.js' +import {typewriterLine} from './typewriter.js' // ─── Constants ──────────────────────────────────────────────────────────────── @@ -14,21 +14,18 @@ const nl = () => process.stdout.write('\n') const p = isColorEnabled ? { - sep: (t) => chalk.hex('#4A9EFF').dim(t), - cyan: (t) => chalk.hex('#00D4FF').bold(t), - green: (t) => chalk.hex('#00FF88').bold(t), - pink: (t) => chalk.hex('#FF3399').bold(t), - gold: (t) => chalk.hex('#FFD700').bold(t), + sep: (t) => chalk.hex('#4A9EFF').dim(t), + cyan: (t) => chalk.hex('#00D4FF').bold(t), + green: (t) => chalk.hex('#00FF88').bold(t), + pink: (t) => chalk.hex('#FF3399').bold(t), + gold: (t) => chalk.hex('#FFD700').bold(t), orange: (t) => chalk.hex('#FF6B2B').bold(t), - blue: (t) => chalk.hex('#4A9EFF')(t), - white: (t) => chalk.white(t), - dim: (t) => chalk.dim(t), + blue: (t) => chalk.hex('#4A9EFF')(t), + white: (t) => chalk.white(t), + dim: (t) => chalk.dim(t), } : Object.fromEntries( - ['sep', 'cyan', 'green', 'pink', 'gold', 'orange', 'blue', 'white', 'dim'].map((k) => [ - k, - (t) => t, - ]), + ['sep', 'cyan', 'green', 'pink', 'gold', 'orange', 'blue', 'white', 'dim'].map((k) => [k, (t) => t]), ) // ─── Helpers ───────────────────────────────────────────────────────────────── @@ -151,13 +148,13 @@ export async function printWelcomeScreen(version = '') { /** @type {Array<[string, string]>} */ const commands = [ - ['dvmi init', 'configure your workspace'], - ['dvmi auth login', 'connect GitHub & ClickUp'], - ['dvmi pr status', 'open pull requests'], + ['dvmi init', 'configure your workspace'], + ['dvmi auth login', 'connect GitHub & ClickUp'], + ['dvmi pr status', 'open pull requests'], ['dvmi pipeline status', 'CI/CD health check'], - ['dvmi tasks today', 'focus mode: what to ship today'], - ['dvmi costs get', 'AWS bill reality check'], - ['dvmi doctor', 'diagnose config issues'], + ['dvmi tasks today', 'focus mode: what to ship today'], + ['dvmi costs get', 'AWS bill reality check'], + ['dvmi doctor', 'diagnose config issues'], ] for (const [cmd, comment] of commands) { out(' ' + p.blue('$ ' + cmd.padEnd(24)) + p.dim('# ' + comment)) diff --git a/src/validators/repo-name.js b/src/validators/repo-name.js index fa8f81c..c2ae895 100644 --- a/src/validators/repo-name.js +++ b/src/validators/repo-name.js @@ -15,7 +15,7 @@ const MAX_LENGTH = 100 */ export function validateRepoName(name) { if (!name || name.length === 0) { - return { valid: false, error: 'Repository name cannot be empty' } + return {valid: false, error: 'Repository name cannot be empty'} } if (name.length > MAX_LENGTH) { @@ -38,5 +38,5 @@ export function validateRepoName(name) { } } - return { valid: true } + return {valid: true} } diff --git a/tests/fixtures/audit-outputs/composer-audit.json b/tests/fixtures/audit-outputs/composer-audit.json index 0710425..852e3c7 100644 --- a/tests/fixtures/audit-outputs/composer-audit.json +++ b/tests/fixtures/audit-outputs/composer-audit.json @@ -8,9 +8,7 @@ "link": "https://symfony.com/cve-2022-24894", "cve": "CVE-2022-24894", "affectedVersions": ">=5.4.0 <5.4.19|>=6.0.0 <6.0.19|>=6.1.0 <6.1.11|>=6.2.0 <6.2.4", - "sources": [ - { "name": "GitHub", "remoteId": "GHSA-h7vf-5wrv-9fhv" } - ], + "sources": [{"name": "GitHub", "remoteId": "GHSA-h7vf-5wrv-9fhv"}], "reportedAt": "2023-02-01T00:00:00+00:00", "composerRepository": "https://packagist.org" } diff --git a/tests/fixtures/audit-outputs/npm-audit.json b/tests/fixtures/audit-outputs/npm-audit.json index e277200..5dd6278 100644 --- a/tests/fixtures/audit-outputs/npm-audit.json +++ b/tests/fixtures/audit-outputs/npm-audit.json @@ -14,14 +14,14 @@ "url": "https://github.com/advisories/GHSA-p6mc-m468-83gw", "severity": "critical", "cwe": ["CWE-1321"], - "cvss": { "score": 9.8, "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H" }, + "cvss": {"score": 9.8, "vectorString": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H"}, "range": "<4.17.21" } ], "effects": [], "range": "<4.17.21", "nodes": ["node_modules/lodash"], - "fixAvailable": { "name": "lodash", "version": "4.17.21", "isSemVerMajor": false } + "fixAvailable": {"name": "lodash", "version": "4.17.21", "isSemVerMajor": false} }, "minimist": { "name": "minimist", @@ -36,7 +36,7 @@ "url": "https://github.com/advisories/GHSA-xvch-5gv4-984h", "severity": "high", "cwe": ["CWE-1321"], - "cvss": { "score": 7.3, "vectorString": null }, + "cvss": {"score": 7.3, "vectorString": null}, "range": ">=1.2.2 <1.2.6" } ], @@ -55,6 +55,6 @@ "critical": 1, "total": 2 }, - "dependencies": { "prod": 50, "dev": 20, "optional": 0, "peer": 0, "peerOptional": 0, "total": 70 } + "dependencies": {"prod": 50, "dev": 20, "optional": 0, "peer": 0, "peerOptional": 0, "total": 70} } } diff --git a/tests/fixtures/audit-outputs/pnpm-audit.json b/tests/fixtures/audit-outputs/pnpm-audit.json index 565c518..ff726fe 100644 --- a/tests/fixtures/audit-outputs/pnpm-audit.json +++ b/tests/fixtures/audit-outputs/pnpm-audit.json @@ -10,9 +10,7 @@ "cves": ["CVE-2021-23337"], "vulnerable_versions": "<4.17.21", "patched_versions": ">=4.17.21", - "findings": [ - { "version": "4.17.20", "paths": ["project > lodash"] } - ] + "findings": [{"version": "4.17.20", "paths": ["project > lodash"]}] }, "1002": { "id": 1002, @@ -23,9 +21,7 @@ "cves": ["CVE-2021-35065"], "vulnerable_versions": "<5.1.2", "patched_versions": ">=5.1.2", - "findings": [ - { "version": "5.1.1", "paths": ["project > chokidar > glob-parent"] } - ] + "findings": [{"version": "5.1.1", "paths": ["project > chokidar > glob-parent"]}] } }, "metadata": { diff --git a/tests/fixtures/config/valid.json b/tests/fixtures/config/valid.json index c1382dc..a75bc5a 100644 --- a/tests/fixtures/config/valid.json +++ b/tests/fixtures/config/valid.json @@ -3,5 +3,5 @@ "awsProfile": "dev", "awsRegion": "eu-west-1", "shell": "zsh", - "clickup": { "teamId": "12345" } + "clickup": {"teamId": "12345"} } diff --git a/tests/fixtures/msw-handlers.js b/tests/fixtures/msw-handlers.js index 4bfde27..08f147b 100644 --- a/tests/fixtures/msw-handlers.js +++ b/tests/fixtures/msw-handlers.js @@ -1,17 +1,45 @@ -import { http, HttpResponse } from 'msw' +import {http, HttpResponse} from 'msw' export const handlers = [ // GitHub: authenticated user - http.get('https://api.github.com/user', () => - HttpResponse.json({ login: 'testdev', name: 'Test Dev', id: 1 }), - ), + http.get('https://api.github.com/user', () => HttpResponse.json({login: 'testdev', name: 'Test Dev', id: 1})), // GitHub: list org repos (includes templates) http.get('https://api.github.com/orgs/:org/repos', () => HttpResponse.json([ - { name: 'template-lambda', is_template: true, language: 'JavaScript', description: 'Lambda starter', html_url: 'https://github.com/acme/template-lambda', pushed_at: '2026-03-01T00:00:00Z', topics: ['template'], private: true, updated_at: '2026-03-01T00:00:00Z' }, - { name: 'template-microservice', is_template: true, language: 'JavaScript', description: 'Microservice starter', html_url: 'https://github.com/acme/template-microservice', pushed_at: '2026-03-01T00:00:00Z', topics: ['template'], private: true, updated_at: '2026-03-01T00:00:00Z' }, - { name: 'my-api', is_template: false, language: 'JavaScript', description: 'Main API', html_url: 'https://github.com/acme/my-api', pushed_at: '2026-03-15T00:00:00Z', topics: ['microservice'], private: true, updated_at: '2026-03-15T00:00:00Z' }, + { + name: 'template-lambda', + is_template: true, + language: 'JavaScript', + description: 'Lambda starter', + html_url: 'https://github.com/acme/template-lambda', + pushed_at: '2026-03-01T00:00:00Z', + topics: ['template'], + private: true, + updated_at: '2026-03-01T00:00:00Z', + }, + { + name: 'template-microservice', + is_template: true, + language: 'JavaScript', + description: 'Microservice starter', + html_url: 'https://github.com/acme/template-microservice', + pushed_at: '2026-03-01T00:00:00Z', + topics: ['template'], + private: true, + updated_at: '2026-03-01T00:00:00Z', + }, + { + name: 'my-api', + is_template: false, + language: 'JavaScript', + description: 'Main API', + html_url: 'https://github.com/acme/my-api', + pushed_at: '2026-03-15T00:00:00Z', + topics: ['microservice'], + private: true, + updated_at: '2026-03-15T00:00:00Z', + }, ]), ), @@ -19,25 +47,47 @@ export const handlers = [ http.get('https://api.github.com/repos/:owner/:repo/actions/runs', () => HttpResponse.json({ workflow_runs: [ - { id: 12345, name: 'CI/CD', status: 'completed', conclusion: 'success', head_branch: 'main', created_at: '2026-03-18T10:00:00Z', updated_at: '2026-03-18T10:03:00Z', actor: { login: 'testdev' }, html_url: 'https://github.com/acme/my-api/actions/runs/12345', display_title: 'CI/CD' }, - { id: 12344, name: 'CI/CD', status: 'completed', conclusion: 'failure', head_branch: 'feature/x', created_at: '2026-03-18T08:00:00Z', updated_at: '2026-03-18T08:01:00Z', actor: { login: 'testdev' }, html_url: 'https://github.com/acme/my-api/actions/runs/12344', display_title: 'CI/CD' }, + { + id: 12345, + name: 'CI/CD', + status: 'completed', + conclusion: 'success', + head_branch: 'main', + created_at: '2026-03-18T10:00:00Z', + updated_at: '2026-03-18T10:03:00Z', + actor: {login: 'testdev'}, + html_url: 'https://github.com/acme/my-api/actions/runs/12345', + display_title: 'CI/CD', + }, + { + id: 12344, + name: 'CI/CD', + status: 'completed', + conclusion: 'failure', + head_branch: 'feature/x', + created_at: '2026-03-18T08:00:00Z', + updated_at: '2026-03-18T08:01:00Z', + actor: {login: 'testdev'}, + html_url: 'https://github.com/acme/my-api/actions/runs/12344', + display_title: 'CI/CD', + }, ], }), ), // GitHub: PR detail - http.get('https://api.github.com/repos/:owner/:repo/pulls/:pull_number', ({ params }) => + http.get('https://api.github.com/repos/:owner/:repo/pulls/:pull_number', ({params}) => HttpResponse.json({ number: Number(params.pull_number), title: 'Feature: user auth', state: 'open', html_url: `https://github.com/${params.owner}/${params.repo}/pull/${params.pull_number}`, draft: false, - user: { login: 'developer1' }, - head: { ref: 'feature/user-auth' }, - base: { ref: 'main' }, - labels: [{ name: 'feature' }], - requested_reviewers: [{ login: 'qa-engineer' }], + user: {login: 'developer1'}, + head: {ref: 'feature/user-auth'}, + base: {ref: 'main'}, + labels: [{name: 'feature'}], + requested_reviewers: [{login: 'qa-engineer'}], }), ), @@ -46,13 +96,13 @@ export const handlers = [ HttpResponse.json([ { id: 1001, - user: { login: 'developer1' }, + user: {login: 'developer1'}, body: 'Implementazione completata.', created_at: '2026-03-17T09:00:00Z', }, { id: 1002, - user: { login: 'qa-engineer' }, + user: {login: 'qa-engineer'}, body: 'QA: review in corso\n- [x] Testare flusso login\n- [ ] Verificare logout', created_at: '2026-03-17T10:00:00Z', }, @@ -64,7 +114,7 @@ export const handlers = [ HttpResponse.json([ { id: 2001, - user: { login: 'qa-engineer' }, + user: {login: 'qa-engineer'}, body: 'QA review completata parzialmente.', submitted_at: '2026-03-17T11:00:00Z', state: 'CHANGES_REQUESTED', @@ -73,7 +123,7 @@ export const handlers = [ ), // GitHub: search issues/PRs (authored + review-requested) - http.get('https://api.github.com/search/issues', ({ request }) => { + http.get('https://api.github.com/search/issues', ({request}) => { const url = new URL(request.url) const q = url.searchParams.get('q') ?? '' const items = q.includes('review-requested') @@ -84,8 +134,8 @@ export const handlers = [ state: 'open', html_url: 'https://github.com/acme/my-api/pull/42', draft: false, - user: { login: 'developer1' }, - pull_request: { head: { ref: 'feature/user-auth' }, base: { ref: 'main' } }, + user: {login: 'developer1'}, + pull_request: {head: {ref: 'feature/user-auth'}, base: {ref: 'main'}}, }, ] : [ @@ -95,41 +145,42 @@ export const handlers = [ state: 'open', html_url: 'https://github.com/acme/my-api/pull/10', draft: false, - user: { login: 'testdev' }, - pull_request: { head: { ref: 'fix/login-timeout' }, base: { ref: 'main' } }, + user: {login: 'testdev'}, + pull_request: {head: {ref: 'fix/login-timeout'}, base: {ref: 'main'}}, }, ] - return HttpResponse.json({ items, total_count: items.length }) + return HttpResponse.json({items, total_count: items.length}) }), // GitHub: search code - http.get('https://api.github.com/search/code', ({ request }) => { + http.get('https://api.github.com/search/code', ({request}) => { const url = new URL(request.url) const q = url.searchParams.get('q') ?? '' return HttpResponse.json({ items: q - ? [{ repository: { name: 'my-api' }, path: 'src/services/user.js', name: 'user.js', html_url: 'https://github.com/acme/my-api/blob/main/src/services/user.js' }] + ? [ + { + repository: {name: 'my-api'}, + path: 'src/services/user.js', + name: 'user.js', + html_url: 'https://github.com/acme/my-api/blob/main/src/services/user.js', + }, + ] : [], }) }), // ClickUp: get user - http.get('https://api.clickup.com/api/v2/user', () => - HttpResponse.json({ user: { id: 42, username: 'testdev' } }), - ), + http.get('https://api.clickup.com/api/v2/user', () => HttpResponse.json({user: {id: 42, username: 'testdev'}})), // ClickUp: list teams/workspaces - http.get('https://api.clickup.com/api/v2/team', () => - HttpResponse.json({ teams: [{ id: '12345', name: 'Acme' }] }), - ), + http.get('https://api.clickup.com/api/v2/team', () => HttpResponse.json({teams: [{id: '12345', name: 'Acme'}]})), // ClickUp: OAuth token exchange - http.post('https://api.clickup.com/api/v2/oauth/token', () => - HttpResponse.json({ access_token: 'test-token' }), - ), + http.post('https://api.clickup.com/api/v2/oauth/token', () => HttpResponse.json({access_token: 'test-token'})), // ClickUp: get tasks (team-wide, supports pagination and due_date_lt) - http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({request}) => { const url = new URL(request.url) const page = Number(url.searchParams.get('page') ?? '0') @@ -138,14 +189,26 @@ export const handlers = [ return HttpResponse.json({ tasks: [ { - id: 'def456', name: 'Fix login bug', status: { status: 'in progress', type: 'in_progress' }, priority: { id: '2' }, - due_date: null, url: 'https://app.clickup.com/t/def456', assignees: [{ username: 'testdev' }], - list: { id: 'L1', name: 'Sprint 42' }, folder: { id: 'F1', name: 'Backend', hidden: false }, + id: 'def456', + name: 'Fix login bug', + status: {status: 'in progress', type: 'in_progress'}, + priority: {id: '2'}, + due_date: null, + url: 'https://app.clickup.com/t/def456', + assignees: [{username: 'testdev'}], + list: {id: 'L1', name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, }, { - id: 'ghi789', name: 'Write unit tests', status: { status: 'todo', type: 'open' }, priority: { id: '3' }, - due_date: null, url: 'https://app.clickup.com/t/ghi789', assignees: [{ username: 'testdev' }], - list: { id: 'L2', name: 'Backlog' }, folder: { hidden: true }, + id: 'ghi789', + name: 'Write unit tests', + status: {status: 'todo', type: 'open'}, + priority: {id: '3'}, + due_date: null, + url: 'https://app.clickup.com/t/ghi789', + assignees: [{username: 'testdev'}], + list: {id: 'L2', name: 'Backlog'}, + folder: {hidden: true}, }, ], has_more: false, @@ -156,9 +219,15 @@ export const handlers = [ return HttpResponse.json({ tasks: [ { - id: 'abc123', name: 'Implement user auth', status: { status: 'in progress', type: 'in_progress' }, priority: { id: '2' }, - due_date: null, url: 'https://app.clickup.com/t/abc123', assignees: [{ username: 'testdev' }], - list: { id: 'L1', name: 'Sprint 42' }, folder: { id: 'F1', name: 'Backend', hidden: false }, + id: 'abc123', + name: 'Implement user auth', + status: {status: 'in progress', type: 'in_progress'}, + priority: {id: '2'}, + due_date: null, + url: 'https://app.clickup.com/t/abc123', + assignees: [{username: 'testdev'}], + list: {id: 'L1', name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, }, ], has_more: false, @@ -166,21 +235,33 @@ export const handlers = [ }), // ClickUp: get tasks by list (specific list endpoint) - http.get('https://api.clickup.com/api/v2/list/:listId/task', ({ params }) => { + http.get('https://api.clickup.com/api/v2/list/:listId/task', ({params}) => { if (params.listId === 'NOTFOUND') { - return HttpResponse.json({ err: 'List not found' }, { status: 404 }) + return HttpResponse.json({err: 'List not found'}, {status: 404}) } return HttpResponse.json({ tasks: [ { - id: 'list-task-1', name: 'List task alpha', status: { status: 'in progress', type: 'in_progress' }, priority: { id: '2' }, - due_date: null, url: 'https://app.clickup.com/t/list-task-1', assignees: [{ username: 'testdev' }], - list: { id: String(params.listId), name: 'Sprint 42' }, folder: { id: 'F1', name: 'Backend', hidden: false }, + id: 'list-task-1', + name: 'List task alpha', + status: {status: 'in progress', type: 'in_progress'}, + priority: {id: '2'}, + due_date: null, + url: 'https://app.clickup.com/t/list-task-1', + assignees: [{username: 'testdev'}], + list: {id: String(params.listId), name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, }, { - id: 'list-task-2', name: 'List task beta (root list)', status: { status: 'todo', type: 'open' }, priority: { id: '3' }, - due_date: null, url: 'https://app.clickup.com/t/list-task-2', assignees: [{ username: 'testdev' }], - list: { id: String(params.listId), name: 'Sprint 42' }, folder: { hidden: true }, + id: 'list-task-2', + name: 'List task beta (root list)', + status: {status: 'todo', type: 'open'}, + priority: {id: '3'}, + due_date: null, + url: 'https://app.clickup.com/t/list-task-2', + assignees: [{username: 'testdev'}], + list: {id: String(params.listId), name: 'Sprint 42'}, + folder: {hidden: true}, }, ], has_more: false, @@ -190,23 +271,23 @@ export const handlers = [ // AWS Cost Explorer http.post('https://ce.us-east-1.amazonaws.com/', () => HttpResponse.json({ - ResultsByTime: [{ - TimePeriod: { Start: '2026-02-01', End: '2026-03-01' }, - Groups: [ - { Keys: ['AWS Lambda'], Metrics: { UnblendedCost: { Amount: '12.34', Unit: 'USD' } } }, - { Keys: ['Amazon API Gateway'], Metrics: { UnblendedCost: { Amount: '5.67', Unit: 'USD' } } }, - ], - }], + ResultsByTime: [ + { + TimePeriod: {Start: '2026-02-01', End: '2026-03-01'}, + Groups: [ + {Keys: ['AWS Lambda'], Metrics: {UnblendedCost: {Amount: '12.34', Unit: 'USD'}}}, + {Keys: ['Amazon API Gateway'], Metrics: {UnblendedCost: {Amount: '5.67', Unit: 'USD'}}}, + ], + }, + ], }), ), // npm registry version check - http.get('https://npm.pkg.github.com/devvami', () => - HttpResponse.json({ 'dist-tags': { latest: '1.0.0' } }), - ), + http.get('https://npm.pkg.github.com/devvami', () => HttpResponse.json({'dist-tags': {latest: '1.0.0'}})), // NVD API: CVE search by keyword - http.get('https://services.nvd.nist.gov/rest/json/cves/2.0', ({ request }) => { + http.get('https://services.nvd.nist.gov/rest/json/cves/2.0', ({request}) => { const url = new URL(request.url) const cveId = url.searchParams.get('cveId') const keyword = url.searchParams.get('keywordSearch') @@ -229,7 +310,11 @@ export const handlers = [ lastModified: '2023-11-07T03:39:36.747', vulnStatus: 'Analyzed', descriptions: [ - { lang: 'en', value: 'Apache Log4j2 2.0-beta9 through 2.15.0 JNDI features do not protect against attacker controlled LDAP and other JNDI related endpoints.' }, + { + lang: 'en', + value: + 'Apache Log4j2 2.0-beta9 through 2.15.0 JNDI features do not protect against attacker controlled LDAP and other JNDI related endpoints.', + }, ], metrics: { cvssMetricV31: [ @@ -242,12 +327,14 @@ export const handlers = [ }, ], }, - weaknesses: [ - { source: 'nvd@nist.gov', type: 'Primary', description: [{ lang: 'en', value: 'CWE-502' }] }, - ], + weaknesses: [{source: 'nvd@nist.gov', type: 'Primary', description: [{lang: 'en', value: 'CWE-502'}]}], configurations: [], references: [ - { url: 'https://logging.apache.org/log4j/2.x/security.html', source: 'cve@mitre.org', tags: ['Vendor Advisory'] }, + { + url: 'https://logging.apache.org/log4j/2.x/security.html', + source: 'cve@mitre.org', + tags: ['Vendor Advisory'], + }, ], }, }, @@ -258,8 +345,13 @@ export const handlers = [ // CVE not found if (cveId) { return HttpResponse.json({ - resultsPerPage: 0, startIndex: 0, totalResults: 0, format: 'NVD_CVE', version: '2.0', - timestamp: '2026-03-28T00:00:00.000', vulnerabilities: [], + resultsPerPage: 0, + startIndex: 0, + totalResults: 0, + format: 'NVD_CVE', + version: '2.0', + timestamp: '2026-03-28T00:00:00.000', + vulnerabilities: [], }) } @@ -280,11 +372,26 @@ export const handlers = [ published: '2026-03-25T00:00:00.000', lastModified: '2026-03-26T00:00:00.000', vulnStatus: 'Analyzed', - descriptions: [{ lang: 'en', value: `Buffer overflow in ${keyword} allows remote attackers to cause a denial of service.` }], + descriptions: [ + { + lang: 'en', + value: `Buffer overflow in ${keyword} allows remote attackers to cause a denial of service.`, + }, + ], metrics: { - cvssMetricV31: [{ cvssData: { baseScore: 9.8, baseSeverity: 'CRITICAL', vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H' } }], + cvssMetricV31: [ + { + cvssData: { + baseScore: 9.8, + baseSeverity: 'CRITICAL', + vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H', + }, + }, + ], }, - weaknesses: [], configurations: [], references: [], + weaknesses: [], + configurations: [], + references: [], }, }, { @@ -294,11 +401,21 @@ export const handlers = [ published: '2026-03-22T00:00:00.000', lastModified: '2026-03-23T00:00:00.000', vulnStatus: 'Analyzed', - descriptions: [{ lang: 'en', value: 'Denial of service via crafted TLS handshake.' }], + descriptions: [{lang: 'en', value: 'Denial of service via crafted TLS handshake.'}], metrics: { - cvssMetricV31: [{ cvssData: { baseScore: 7.5, baseSeverity: 'HIGH', vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H' } }], + cvssMetricV31: [ + { + cvssData: { + baseScore: 7.5, + baseSeverity: 'HIGH', + vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H', + }, + }, + ], }, - weaknesses: [], configurations: [], references: [], + weaknesses: [], + configurations: [], + references: [], }, }, { @@ -308,11 +425,21 @@ export const handlers = [ published: '2026-03-20T00:00:00.000', lastModified: '2026-03-21T00:00:00.000', vulnStatus: 'Awaiting Analysis', - descriptions: [{ lang: 'en', value: 'Information disclosure due to improper memory handling.' }], + descriptions: [{lang: 'en', value: 'Information disclosure due to improper memory handling.'}], metrics: { - cvssMetricV31: [{ cvssData: { baseScore: 5.3, baseSeverity: 'MEDIUM', vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N' } }], + cvssMetricV31: [ + { + cvssData: { + baseScore: 5.3, + baseSeverity: 'MEDIUM', + vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:L/I:N/A:N', + }, + }, + ], }, - weaknesses: [], configurations: [], references: [], + weaknesses: [], + configurations: [], + references: [], }, }, ], @@ -320,8 +447,13 @@ export const handlers = [ } return HttpResponse.json({ - resultsPerPage: 0, startIndex: 0, totalResults: 0, format: 'NVD_CVE', version: '2.0', - timestamp: '2026-03-28T00:00:00.000', vulnerabilities: [], + resultsPerPage: 0, + startIndex: 0, + totalResults: 0, + format: 'NVD_CVE', + version: '2.0', + timestamp: '2026-03-28T00:00:00.000', + vulnerabilities: [], }) }), ] diff --git a/tests/fixtures/nvd-responses/cve-detail.json b/tests/fixtures/nvd-responses/cve-detail.json index b66e02d..2b090ee 100644 --- a/tests/fixtures/nvd-responses/cve-detail.json +++ b/tests/fixtures/nvd-responses/cve-detail.json @@ -34,16 +34,12 @@ { "source": "nvd@nist.gov", "type": "Primary", - "description": [ - { "lang": "en", "value": "CWE-502" } - ] + "description": [{"lang": "en", "value": "CWE-502"}] }, { "source": "nvd@nist.gov", "type": "Secondary", - "description": [ - { "lang": "en", "value": "CWE-400" } - ] + "description": [{"lang": "en", "value": "CWE-400"}] } ], "configurations": [ diff --git a/tests/fixtures/nvd-responses/search-results.json b/tests/fixtures/nvd-responses/search-results.json index 1460379..864caae 100644 --- a/tests/fixtures/nvd-responses/search-results.json +++ b/tests/fixtures/nvd-responses/search-results.json @@ -14,7 +14,7 @@ "lastModified": "2026-03-26T00:00:00.000", "vulnStatus": "Analyzed", "descriptions": [ - { "lang": "en", "value": "Buffer overflow in OpenSSL allows remote attackers to cause a denial of service." } + {"lang": "en", "value": "Buffer overflow in OpenSSL allows remote attackers to cause a denial of service."} ], "metrics": { "cvssMetricV31": [ @@ -30,7 +30,11 @@ "weaknesses": [], "configurations": [], "references": [ - { "url": "https://www.openssl.org/news/secadv/20260325.txt", "source": "openssl.org", "tags": ["Vendor Advisory"] } + { + "url": "https://www.openssl.org/news/secadv/20260325.txt", + "source": "openssl.org", + "tags": ["Vendor Advisory"] + } ] } }, @@ -41,9 +45,7 @@ "published": "2026-03-22T00:00:00.000", "lastModified": "2026-03-23T00:00:00.000", "vulnStatus": "Analyzed", - "descriptions": [ - { "lang": "en", "value": "Denial of service via crafted TLS handshake in OpenSSL." } - ], + "descriptions": [{"lang": "en", "value": "Denial of service via crafted TLS handshake in OpenSSL."}], "metrics": { "cvssMetricV31": [ { @@ -57,9 +59,7 @@ }, "weaknesses": [], "configurations": [], - "references": [ - { "url": "https://nvd.nist.gov/vuln/detail/CVE-2026-1235", "source": "nvd.nist.gov", "tags": [] } - ] + "references": [{"url": "https://nvd.nist.gov/vuln/detail/CVE-2026-1235", "source": "nvd.nist.gov", "tags": []}] } }, { @@ -69,9 +69,7 @@ "published": "2026-03-20T00:00:00.000", "lastModified": "2026-03-21T00:00:00.000", "vulnStatus": "Awaiting Analysis", - "descriptions": [ - { "lang": "en", "value": "Information disclosure in OpenSSL due to improper memory handling." } - ], + "descriptions": [{"lang": "en", "value": "Information disclosure in OpenSSL due to improper memory handling."}], "metrics": { "cvssMetricV31": [ { diff --git a/tests/integration/changelog.test.js b/tests/integration/changelog.test.js index 25268b3..aeef519 100644 --- a/tests/integration/changelog.test.js +++ b/tests/integration/changelog.test.js @@ -1,9 +1,9 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' describe('dvmi changelog', () => { it('--help exits 0', async () => { - const { stdout, exitCode } = await runCli(['changelog', '--help']) + const {stdout, exitCode} = await runCli(['changelog', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('--from') @@ -11,7 +11,7 @@ describe('dvmi changelog', () => { }) it('--json returns sections object', async () => { - const { stdout, exitCode } = await runCli(['changelog', '--json']) + const {stdout, exitCode} = await runCli(['changelog', '--json']) // Works in any git repo expect(exitCode).toBe(0) const data = JSON.parse(stdout) diff --git a/tests/integration/costs-get.test.js b/tests/integration/costs-get.test.js index 8294151..b6e4f38 100644 --- a/tests/integration/costs-get.test.js +++ b/tests/integration/costs-get.test.js @@ -1,18 +1,15 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' // AWS-calling tests require real credentials — skip in CI or when no creds are configured const hasAwsCreds = Boolean( - process.env.AWS_ACCESS_KEY_ID || - process.env.AWS_PROFILE || - process.env.AWS_VAULT || - process.env.AWS_SESSION_TOKEN, + process.env.AWS_ACCESS_KEY_ID || process.env.AWS_PROFILE || process.env.AWS_VAULT || process.env.AWS_SESSION_TOKEN, ) const skipAws = Boolean(process.env.CI) || !hasAwsCreds describe('dvmi costs get', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['costs', 'get', '--help']) + const {stdout, exitCode} = await runCli(['costs', 'get', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('--group-by') expect(stdout).toContain('--tag-key') @@ -21,39 +18,37 @@ describe('dvmi costs get', () => { it('exits 1 with message when --group-by tag but no tag key available', async () => { // Fixture config has no projectTags, so without --tag-key this must fail - const { stderr, exitCode } = await runCli(['costs', 'get', '--group-by', 'tag']) + const {stderr, exitCode} = await runCli(['costs', 'get', '--group-by', 'tag']) expect(exitCode).toBe(1) expect(stderr).toMatch(/No tag key available|tag-key/) }) it('exits 1 with message when --group-by both but no tag key available', async () => { - const { stderr, exitCode } = await runCli(['costs', 'get', '--group-by', 'both']) + const {stderr, exitCode} = await runCli(['costs', 'get', '--group-by', 'both']) expect(exitCode).toBe(1) expect(stderr).toMatch(/No tag key available|tag-key/) }) it.skipIf(skipAws)('renders grouped table for --group-by service', async () => { - const { stdout, exitCode } = await runCli(['costs', 'get', '--group-by', 'service']) + const {stdout, exitCode} = await runCli(['costs', 'get', '--group-by', 'service']) expect(exitCode).toBe(0) expect(stdout).toMatch(/Costs for:|No costs found/) }) it.skipIf(skipAws)('renders tag-grouped table for --group-by tag --tag-key env', async () => { - const { stdout, exitCode } = await runCli(['costs', 'get', '--group-by', 'tag', '--tag-key', 'env']) + const {stdout, exitCode} = await runCli(['costs', 'get', '--group-by', 'tag', '--tag-key', 'env']) expect(exitCode).toBe(0) expect(stdout).toMatch(/Costs for:|No costs found/) }) it.skipIf(skipAws)('renders service+tag rows for --group-by both --tag-key env', async () => { - const { stdout, exitCode } = await runCli(['costs', 'get', '--group-by', 'both', '--tag-key', 'env']) + const {stdout, exitCode} = await runCli(['costs', 'get', '--group-by', 'both', '--tag-key', 'env']) expect(exitCode).toBe(0) expect(stdout).toMatch(/Costs for:|No costs found/) }) it.skipIf(skipAws)('--json output includes groupBy and tagKey fields', async () => { - const { stdout, exitCode } = await runCli([ - 'costs', 'get', '--group-by', 'tag', '--tag-key', 'env', '--json', - ]) + const {stdout, exitCode} = await runCli(['costs', 'get', '--group-by', 'tag', '--tag-key', 'env', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('groupBy', 'tag') diff --git a/tests/integration/costs-trend.test.js b/tests/integration/costs-trend.test.js index a6bcaf9..5d7ee20 100644 --- a/tests/integration/costs-trend.test.js +++ b/tests/integration/costs-trend.test.js @@ -1,18 +1,15 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' // AWS-calling tests require real credentials — skip in CI or when no creds are configured const hasAwsCreds = Boolean( - process.env.AWS_ACCESS_KEY_ID || - process.env.AWS_PROFILE || - process.env.AWS_VAULT || - process.env.AWS_SESSION_TOKEN, + process.env.AWS_ACCESS_KEY_ID || process.env.AWS_PROFILE || process.env.AWS_VAULT || process.env.AWS_SESSION_TOKEN, ) const skipAws = Boolean(process.env.CI) || !hasAwsCreds describe('dvmi costs trend', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['costs', 'trend', '--help']) + const {stdout, exitCode} = await runCli(['costs', 'trend', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('--group-by') expect(stdout).toContain('--tag-key') @@ -20,26 +17,26 @@ describe('dvmi costs trend', () => { }) it('exits 1 with message when --group-by tag but no tag key available', async () => { - const { stderr, exitCode } = await runCli(['costs', 'trend', '--group-by', 'tag']) + const {stderr, exitCode} = await runCli(['costs', 'trend', '--group-by', 'tag']) expect(exitCode).toBe(1) expect(stderr).toMatch(/No tag key available|tag-key/) }) it.skipIf(skipAws)('renders a bar chart by default', async () => { - const { stdout, exitCode } = await runCli(['costs', 'trend']) + const {stdout, exitCode} = await runCli(['costs', 'trend']) expect(exitCode).toBe(0) // Title must appear expect(stdout).toMatch(/AWS Cost Trend|No cost data found/) }) it.skipIf(skipAws)('renders a line chart with --line flag', async () => { - const { stdout, exitCode } = await runCli(['costs', 'trend', '--line']) + const {stdout, exitCode} = await runCli(['costs', 'trend', '--line']) expect(exitCode).toBe(0) expect(stdout).toMatch(/AWS Cost Trend|No cost data found/) }) it.skipIf(skipAws)('--json outputs valid JSON with series array', async () => { - const { stdout, exitCode } = await runCli(['costs', 'trend', '--json']) + const {stdout, exitCode} = await runCli(['costs', 'trend', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('groupBy') @@ -49,9 +46,7 @@ describe('dvmi costs trend', () => { }) it.skipIf(skipAws)('--group-by tag --tag-key env renders multi-series chart', async () => { - const { stdout, exitCode } = await runCli([ - 'costs', 'trend', '--group-by', 'tag', '--tag-key', 'env', - ]) + const {stdout, exitCode} = await runCli(['costs', 'trend', '--group-by', 'tag', '--tag-key', 'env']) expect(exitCode).toBe(0) expect(stdout).toMatch(/AWS Cost Trend|No cost data found/) }) diff --git a/tests/integration/doctor.test.js b/tests/integration/doctor.test.js index d391e39..f7e148c 100644 --- a/tests/integration/doctor.test.js +++ b/tests/integration/doctor.test.js @@ -1,9 +1,9 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' describe('dvmi doctor', () => { it('exits 0 and returns checks', async () => { - const { stdout, exitCode } = await runCli(['doctor', '--json']) + const {stdout, exitCode} = await runCli(['doctor', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data.checks.length).toBeGreaterThan(0) @@ -16,7 +16,7 @@ describe('dvmi doctor', () => { }) it('includes Node.js check', async () => { - const { stdout } = await runCli(['doctor', '--json']) + const {stdout} = await runCli(['doctor', '--json']) const data = JSON.parse(stdout) const nodeCheck = data.checks.find((c) => c.name === 'Node.js') expect(nodeCheck).toBeDefined() @@ -24,12 +24,12 @@ describe('dvmi doctor', () => { }) it('summary counts match checks', async () => { - const { stdout } = await runCli(['doctor', '--json']) + const {stdout} = await runCli(['doctor', '--json']) const data = JSON.parse(stdout) - const expected = data.checks.reduce( - (acc, c) => { acc[c.status] = (acc[c.status] ?? 0) + 1; return acc }, - {}, - ) + const expected = data.checks.reduce((acc, c) => { + acc[c.status] = (acc[c.status] ?? 0) + 1 + return acc + }, {}) expect(data.summary.ok).toBe(expected.ok ?? 0) expect(data.summary.warn).toBe(expected.warn ?? 0) expect(data.summary.fail).toBe(expected.fail ?? 0) diff --git a/tests/integration/dotfiles/add.test.js b/tests/integration/dotfiles/add.test.js index 3ef5171..336d149 100644 --- a/tests/integration/dotfiles/add.test.js +++ b/tests/integration/dotfiles/add.test.js @@ -1,26 +1,26 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' describe('dvmi dotfiles add', () => { // --------------------------------------------------------------------------- // --help // --------------------------------------------------------------------------- it('--help exits 0 and mentions dotfiles or chezmoi', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'add', '--help']) + const {stdout, exitCode} = await runCli(['dotfiles', 'add', '--help']) expect(exitCode).toBe(0) const lower = stdout.toLowerCase() expect(lower.match(/dotfile|chezmoi|add|track/)).toBeTruthy() }) it('--help includes expected flags', async () => { - const { stdout } = await runCli(['dotfiles', 'add', '--help']) + const {stdout} = await runCli(['dotfiles', 'add', '--help']) expect(stdout).toContain('--help') expect(stdout).toContain('--json') expect(stdout).toContain('--encrypt') }) it('--help includes examples', async () => { - const { stdout } = await runCli(['dotfiles', 'add', '--help']) + const {stdout} = await runCli(['dotfiles', 'add', '--help']) expect(stdout).toContain('dotfiles add') }) @@ -28,7 +28,7 @@ describe('dvmi dotfiles add', () => { // --json with no dotfiles enabled (default fixture config has no dotfiles) // --------------------------------------------------------------------------- it('--json exits non-zero when dotfiles not configured', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'add', '--json', '~/.zshrc']) + const {stdout, exitCode} = await runCli(['dotfiles', 'add', '--json', '~/.zshrc']) // The fixture config has no dotfiles.enabled — command should error // oclif --json mode writes error JSON to stdout expect(exitCode).not.toBe(0) @@ -39,10 +39,9 @@ describe('dvmi dotfiles add', () => { // --json with enabled fixture // --------------------------------------------------------------------------- it('--json returns valid DotfilesAddResult shape when enabled', async () => { - const { stdout, exitCode } = await runCli( - ['dotfiles', 'add', '--json', '/tmp/nonexistent-dvmi-test-file'], - { DVMI_DOTFILES_ENABLED: 'true' }, - ) + const {stdout, exitCode} = await runCli(['dotfiles', 'add', '--json', '/tmp/nonexistent-dvmi-test-file'], { + DVMI_DOTFILES_ENABLED: 'true', + }) if (exitCode === 0) { const data = JSON.parse(stdout) expect(data).toHaveProperty('added') diff --git a/tests/integration/dotfiles/setup.test.js b/tests/integration/dotfiles/setup.test.js index adcd11e..507a3ab 100644 --- a/tests/integration/dotfiles/setup.test.js +++ b/tests/integration/dotfiles/setup.test.js @@ -1,25 +1,25 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' describe('dvmi dotfiles setup', () => { // --------------------------------------------------------------------------- // --help // --------------------------------------------------------------------------- it('--help exits 0 and mentions encryption or chezmoi', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'setup', '--help']) + const {stdout, exitCode} = await runCli(['dotfiles', 'setup', '--help']) expect(exitCode).toBe(0) const lower = stdout.toLowerCase() expect(lower.match(/chezmoi|dotfile|encrypt/)).toBeTruthy() }) it('--help includes expected flags', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--help']) + const {stdout} = await runCli(['dotfiles', 'setup', '--help']) expect(stdout).toContain('--help') expect(stdout).toContain('--json') }) it('--help includes examples', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--help']) + const {stdout} = await runCli(['dotfiles', 'setup', '--help']) expect(stdout).toContain('dotfiles setup') }) @@ -27,7 +27,7 @@ describe('dvmi dotfiles setup', () => { // --json // --------------------------------------------------------------------------- it('--json exits 0 and returns valid JSON', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'setup', '--json']) + const {stdout, exitCode} = await runCli(['dotfiles', 'setup', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('platform') @@ -37,25 +37,25 @@ describe('dvmi dotfiles setup', () => { }) it('--json platform is a valid platform string', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--json']) + const {stdout} = await runCli(['dotfiles', 'setup', '--json']) const data = JSON.parse(stdout) expect(['macos', 'linux', 'wsl2']).toContain(data.platform) }) it('--json status is one of the expected values', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--json']) + const {stdout} = await runCli(['dotfiles', 'setup', '--json']) const data = JSON.parse(stdout) expect(['success', 'skipped', 'failed']).toContain(data.status) }) it('--json chezmoiInstalled is a boolean', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--json']) + const {stdout} = await runCli(['dotfiles', 'setup', '--json']) const data = JSON.parse(stdout) expect(typeof data.chezmoiInstalled).toBe('boolean') }) it('--json encryptionConfigured is a boolean', async () => { - const { stdout } = await runCli(['dotfiles', 'setup', '--json']) + const {stdout} = await runCli(['dotfiles', 'setup', '--json']) const data = JSON.parse(stdout) expect(typeof data.encryptionConfigured).toBe('boolean') }) @@ -64,7 +64,7 @@ describe('dvmi dotfiles setup', () => { // CI / non-interactive exit // --------------------------------------------------------------------------- it('CI=true without --json exits non-zero with TTY error', async () => { - const { stderr, exitCode } = await runCli(['dotfiles', 'setup'], { CI: 'true' }) + const {stderr, exitCode} = await runCli(['dotfiles', 'setup'], {CI: 'true'}) expect(exitCode).not.toBe(0) expect(stderr.toLowerCase()).toMatch(/interactive|terminal|tty/) }) diff --git a/tests/integration/dotfiles/status.test.js b/tests/integration/dotfiles/status.test.js index 890eecf..6f42b00 100644 --- a/tests/integration/dotfiles/status.test.js +++ b/tests/integration/dotfiles/status.test.js @@ -1,25 +1,25 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' describe('dvmi dotfiles status', () => { // --------------------------------------------------------------------------- // --help // --------------------------------------------------------------------------- it('--help exits 0 and mentions dotfiles or status', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'status', '--help']) + const {stdout, exitCode} = await runCli(['dotfiles', 'status', '--help']) expect(exitCode).toBe(0) const lower = stdout.toLowerCase() expect(lower.match(/dotfile|status|chezmoi|managed/)).toBeTruthy() }) it('--help includes expected flags', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--help']) + const {stdout} = await runCli(['dotfiles', 'status', '--help']) expect(stdout).toContain('--help') expect(stdout).toContain('--json') }) it('--help includes examples', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--help']) + const {stdout} = await runCli(['dotfiles', 'status', '--help']) expect(stdout).toContain('dotfiles status') }) @@ -27,7 +27,7 @@ describe('dvmi dotfiles status', () => { // --json — not-configured state is valid (not an error) // --------------------------------------------------------------------------- it('--json exits 0 in not-configured state', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'status', '--json']) + const {stdout, exitCode} = await runCli(['dotfiles', 'status', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('platform') @@ -39,19 +39,19 @@ describe('dvmi dotfiles status', () => { }) it('--json enabled is boolean', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--json']) + const {stdout} = await runCli(['dotfiles', 'status', '--json']) const data = JSON.parse(stdout) expect(typeof data.enabled).toBe('boolean') }) it('--json files is an array', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--json']) + const {stdout} = await runCli(['dotfiles', 'status', '--json']) const data = JSON.parse(stdout) expect(Array.isArray(data.files)).toBe(true) }) it('--json summary has required numeric fields', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--json']) + const {stdout} = await runCli(['dotfiles', 'status', '--json']) const data = JSON.parse(stdout) expect(typeof data.summary.total).toBe('number') expect(typeof data.summary.encrypted).toBe('number') @@ -59,7 +59,7 @@ describe('dvmi dotfiles status', () => { }) it('--json platform is a valid platform string', async () => { - const { stdout } = await runCli(['dotfiles', 'status', '--json']) + const {stdout} = await runCli(['dotfiles', 'status', '--json']) const data = JSON.parse(stdout) expect(['macos', 'linux', 'wsl2']).toContain(data.platform) }) diff --git a/tests/integration/dotfiles/sync.test.js b/tests/integration/dotfiles/sync.test.js index 37e6caa..1f35d94 100644 --- a/tests/integration/dotfiles/sync.test.js +++ b/tests/integration/dotfiles/sync.test.js @@ -1,19 +1,19 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' describe('dvmi dotfiles sync', () => { // --------------------------------------------------------------------------- // --help // --------------------------------------------------------------------------- it('--help exits 0 and mentions sync or remote', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'sync', '--help']) + const {stdout, exitCode} = await runCli(['dotfiles', 'sync', '--help']) expect(exitCode).toBe(0) const lower = stdout.toLowerCase() expect(lower.match(/sync|remote|push|pull|dotfile/)).toBeTruthy() }) it('--help includes expected flags', async () => { - const { stdout } = await runCli(['dotfiles', 'sync', '--help']) + const {stdout} = await runCli(['dotfiles', 'sync', '--help']) expect(stdout).toContain('--help') expect(stdout).toContain('--json') expect(stdout).toContain('--push') @@ -22,7 +22,7 @@ describe('dvmi dotfiles sync', () => { }) it('--help includes examples', async () => { - const { stdout } = await runCli(['dotfiles', 'sync', '--help']) + const {stdout} = await runCli(['dotfiles', 'sync', '--help']) expect(stdout).toContain('dotfiles sync') }) @@ -30,7 +30,7 @@ describe('dvmi dotfiles sync', () => { // --json — errors when dotfiles not configured // --------------------------------------------------------------------------- it('--json exits non-zero when dotfiles not configured', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'sync', '--json']) + const {stdout, exitCode} = await runCli(['dotfiles', 'sync', '--json']) // oclif --json mode writes error JSON to stdout expect(exitCode).not.toBe(0) expect(stdout.toLowerCase()).toMatch(/dotfiles|setup|configured|dvmi dotfiles setup/) @@ -40,7 +40,7 @@ describe('dvmi dotfiles sync', () => { // Flag validation // --------------------------------------------------------------------------- it('--push and --pull together exits non-zero with mutual exclusion error', async () => { - const { stdout, exitCode } = await runCli(['dotfiles', 'sync', '--push', '--pull', '--json']) + const {stdout, exitCode} = await runCli(['dotfiles', 'sync', '--push', '--pull', '--json']) // oclif --json mode writes error JSON to stdout expect(exitCode).not.toBe(0) expect(stdout.toLowerCase()).toMatch(/push.*pull|cannot|together|mutually|exclus/) @@ -50,7 +50,7 @@ describe('dvmi dotfiles sync', () => { // CI / non-interactive exit // --------------------------------------------------------------------------- it('CI=true without --json exits non-zero with TTY error', async () => { - const { stderr, exitCode } = await runCli(['dotfiles', 'sync'], { CI: 'true' }) + const {stderr, exitCode} = await runCli(['dotfiles', 'sync'], {CI: 'true'}) expect(exitCode).not.toBe(0) expect(stderr.toLowerCase()).toMatch(/interactive|terminal|tty/) }) diff --git a/tests/integration/help.test.js b/tests/integration/help.test.js index 2d81e07..537f2ec 100644 --- a/tests/integration/help.test.js +++ b/tests/integration/help.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' const commands = [ ['--help'], @@ -40,7 +40,7 @@ const commands = [ describe('--help output', () => { for (const args of commands) { it(`dvmi ${args.join(' ')}`, async () => { - const { stdout, exitCode } = await runCli(args) + const {stdout, exitCode} = await runCli(args) expect(exitCode).toBe(0) expect(stdout.length).toBeGreaterThan(10) expect(stdout).toContain('USAGE') @@ -49,7 +49,7 @@ describe('--help output', () => { // T022 (US3): branch create must not appear in top-level --help it('dvmi --help does not list branch create', async () => { - const { stdout, exitCode } = await runCli(['--help']) + const {stdout, exitCode} = await runCli(['--help']) expect(exitCode).toBe(0) expect(stdout).not.toContain('branch create') }) diff --git a/tests/integration/helpers.js b/tests/integration/helpers.js index 67943ec..a0568ff 100644 --- a/tests/integration/helpers.js +++ b/tests/integration/helpers.js @@ -1,7 +1,7 @@ -import { execaNode } from 'execa' -import { createServer } from 'node:http' -import { resolve, dirname } from 'node:path' -import { fileURLToPath } from 'node:url' +import {execaNode} from 'execa' +import {createServer} from 'node:http' +import {resolve, dirname} from 'node:path' +import {fileURLToPath} from 'node:url' import stripAnsi from 'strip-ansi' const __dirname = dirname(fileURLToPath(import.meta.url)) @@ -60,14 +60,11 @@ export async function runCliJson(args) { export async function createMockServer(handler) { const server = createServer(handler) await new Promise((resolve) => server.listen(0, '127.0.0.1', resolve)) - const { port } = /** @type {import('node:net').AddressInfo} */ (server.address()) + const {port} = /** @type {import('node:net').AddressInfo} */ (server.address()) return { port, url: `http://127.0.0.1:${port}`, - stop: () => - new Promise((resolve, reject) => - server.close((err) => (err ? reject(err) : resolve())), - ), + stop: () => new Promise((resolve, reject) => server.close((err) => (err ? reject(err) : resolve()))), } } @@ -96,5 +93,21 @@ export function jsonResponse(res, data, status = 200) { * @returns {Promise<{ stdout: string, stderr: string, exitCode: number }>} */ export function runCliWithMockGitHub(args, port, extraEnv = {}) { - return runCli(args, { GITHUB_API_URL: `http://127.0.0.1:${port}`, ...extraEnv }) + return runCli(args, {GITHUB_API_URL: `http://127.0.0.1:${port}`, ...extraEnv}) +} + +/** + * Run `dvmi sync-config-ai --json` and return the parsed result. + * Uses a temporary AI config path so tests are isolated from the real store. + * + * @param {string} [aiConfigPath] - Override AI config path (defaults to DVMI_AI_CONFIG_PATH env or a temp path) + * @returns {Promise<{ environments: unknown[], categories: { mcp: unknown[], command: unknown[], skill: unknown[], agent: unknown[] } }>} + */ +export async function runSyncConfigAi(aiConfigPath) { + const env = aiConfigPath ? {DVMI_AI_CONFIG_PATH: aiConfigPath} : {} + const result = await runCli(['sync-config-ai', '--json'], env) + if (result.exitCode !== 0) { + throw new Error(`sync-config-ai exited with ${result.exitCode}: ${result.stderr}`) + } + return JSON.parse(result.stdout) } diff --git a/tests/integration/init.test.js b/tests/integration/init.test.js index 7eaf7ab..8e45def 100644 --- a/tests/integration/init.test.js +++ b/tests/integration/init.test.js @@ -1,16 +1,16 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' describe('dvmi init', () => { it('--help exits 0 and contains description', async () => { - const { stdout, exitCode } = await runCli(['init', '--help']) + const {stdout, exitCode} = await runCli(['init', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('--dry-run') }) it('--dry-run --json exits 0 and returns steps array', async () => { - const { stdout, exitCode } = await runCli(['init', '--dry-run', '--json']) + const {stdout, exitCode} = await runCli(['init', '--dry-run', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('steps') @@ -19,14 +19,14 @@ describe('dvmi init', () => { }) it('--dry-run does not modify real config', async () => { - const { exitCode } = await runCli(['init', '--dry-run', '--json']) + const {exitCode} = await runCli(['init', '--dry-run', '--json']) expect(exitCode).toBe(0) // No side effects — config path in test env is isolated }) // T011: ClickUp step appears in --dry-run --json output it('--dry-run --json includes clickup step with status "would configure"', async () => { - const { stdout, exitCode } = await runCli(['init', '--dry-run', '--json']) + const {stdout, exitCode} = await runCli(['init', '--dry-run', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) const clickupStep = data.steps.find((s) => s.name === 'clickup') @@ -36,14 +36,14 @@ describe('dvmi init', () => { // T012: --help does not mention branch create it('--help does not mention "branch create"', async () => { - const { stdout, exitCode } = await runCli(['init', '--help']) + const {stdout, exitCode} = await runCli(['init', '--help']) expect(exitCode).toBe(0) expect(stdout).not.toContain('branch create') }) // T015 (US2): --json mode reports clickup not_configured when no clickup settings exist it('--json reports clickup not_configured when no clickup teamId in config', async () => { - const { stdout, exitCode } = await runCli(['init', '--json']) + const {stdout, exitCode} = await runCli(['init', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) const clickupStep = data.steps.find((s) => s.name === 'clickup') diff --git a/tests/integration/json-output.test.js b/tests/integration/json-output.test.js index 5a6782c..3648dd3 100644 --- a/tests/integration/json-output.test.js +++ b/tests/integration/json-output.test.js @@ -1,8 +1,8 @@ -import { describe, it, expect } from 'vitest' -import { runCli, runCliJson } from './helpers.js' -import { mkdtemp, rm } from 'node:fs/promises' -import { tmpdir } from 'node:os' -import { join } from 'node:path' +import {describe, it, expect} from 'vitest' +import {runCli, runCliJson} from './helpers.js' +import {mkdtemp, rm} from 'node:fs/promises' +import {tmpdir} from 'node:os' +import {join} from 'node:path' // Tests that call the real ClickUp API require a token in the keychain. // In CI there are no real credentials, so we skip those tests. @@ -10,7 +10,7 @@ const isCI = Boolean(process.env.CI) describe('--json flag', () => { it('doctor --json returns valid JSON with checks array', async () => { - const { stdout, exitCode } = await runCli(['doctor', '--json']) + const {stdout, exitCode} = await runCli(['doctor', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('checks') @@ -22,7 +22,7 @@ describe('--json flag', () => { }) it('changelog --json returns sections object', async () => { - const { stdout, exitCode } = await runCli(['changelog', '--json']) + const {stdout, exitCode} = await runCli(['changelog', '--json']) // May fail if not in a git repo with tags, but should produce valid JSON or non-zero exit if (exitCode === 0) { const data = JSON.parse(stdout) @@ -58,7 +58,7 @@ describe('--json flag', () => { }) it('vuln search --json returns valid JSON shape or non-zero exit in offline env', async () => { - const { stdout, stderr, exitCode } = await runCli(['vuln', 'search', 'openssl', '--json']) + const {stdout, stderr, exitCode} = await runCli(['vuln', 'search', 'openssl', '--json']) if (exitCode === 0) { const data = JSON.parse(stdout) expect(data).toHaveProperty('keyword', 'openssl') @@ -70,7 +70,7 @@ describe('--json flag', () => { }) it('vuln detail --json returns valid JSON shape or non-zero exit in offline env', async () => { - const { stdout, stderr, exitCode } = await runCli(['vuln', 'detail', 'CVE-2021-44228', '--json']) + const {stdout, stderr, exitCode} = await runCli(['vuln', 'detail', 'CVE-2021-44228', '--json']) if (exitCode === 0) { const data = JSON.parse(stdout) expect(data).toHaveProperty('id', 'CVE-2021-44228') @@ -84,7 +84,7 @@ describe('--json flag', () => { it('vuln scan --json returns valid JSON shape in empty dir', async () => { const tmpDir = await mkdtemp(join(tmpdir(), 'dvmi-json-scan-')) try { - const { stdout, exitCode } = await runCli(['vuln', 'scan', '--json'], { + const {stdout, exitCode} = await runCli(['vuln', 'scan', '--json'], { DVMI_SCAN_DIR: tmpDir, }) if (exitCode === 0) { @@ -95,7 +95,7 @@ describe('--json flag', () => { expect(data).toHaveProperty('errors') } } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) }) diff --git a/tests/integration/logs.test.js b/tests/integration/logs.test.js index 2ce3358..83d45a5 100644 --- a/tests/integration/logs.test.js +++ b/tests/integration/logs.test.js @@ -1,18 +1,15 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' // AWS-calling tests require real credentials — skip in CI or when no creds are configured const hasAwsCreds = Boolean( - process.env.AWS_ACCESS_KEY_ID || - process.env.AWS_PROFILE || - process.env.AWS_VAULT || - process.env.AWS_SESSION_TOKEN, + process.env.AWS_ACCESS_KEY_ID || process.env.AWS_PROFILE || process.env.AWS_VAULT || process.env.AWS_SESSION_TOKEN, ) const skipAws = Boolean(process.env.CI) || !hasAwsCreds describe('dvmi logs', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['logs', '--help']) + const {stdout, exitCode} = await runCli(['logs', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('--group') expect(stdout).toContain('--filter') @@ -22,39 +19,31 @@ describe('dvmi logs', () => { }) it('exits 1 for --limit 0', async () => { - const { stderr, exitCode } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--limit', '0', - ]) + const {stderr, exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn', '--limit', '0']) expect(exitCode).toBe(1) expect(stderr).toMatch(/--limit must be between 1 and 10000/) }) it('exits 1 for --limit 99999', async () => { - const { stderr, exitCode } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--limit', '99999', - ]) + const {stderr, exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn', '--limit', '99999']) expect(exitCode).toBe(1) expect(stderr).toMatch(/--limit must be between 1 and 10000/) }) it('exits 1 for invalid --since value', async () => { - const { stderr, exitCode } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--since', '2d', - ]) + const {stderr, exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn', '--since', '2d']) expect(exitCode).toBe(1) expect(stderr).toMatch(/--since must be one of: 1h, 24h, 7d/) }) it.skipIf(skipAws)('renders event table for a valid log group', async () => { - const { stdout, exitCode } = await runCli(['logs', '--group', '/aws/lambda/fn']) + const {stdout, exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn']) expect(exitCode).toBe(0) expect(stdout).toMatch(/Log Group:|No log groups found/) }) it.skipIf(skipAws)('--json outputs NDJSON to stdout', async () => { - const { stdout, exitCode } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--json', - ]) + const {stdout, exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn', '--json']) expect(exitCode).toBe(0) // Each line should be parseable JSON or empty for (const line of stdout.split('\n').filter(Boolean)) { @@ -63,25 +52,19 @@ describe('dvmi logs', () => { }) it.skipIf(skipAws)('--limit 5 shows truncation notice when events fill the limit', async () => { - const { stdout } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--limit', '5', - ]) + const {stdout} = await runCli(['logs', '--group', '/aws/lambda/fn', '--limit', '5']) // If 5 events returned, truncation notice should appear (or 0 events) expect(stdout).toMatch(/events shown|No log groups found/) }) it.skipIf(skipAws)('--since 7d uses correct time window', async () => { - const { exitCode } = await runCli([ - 'logs', '--group', '/aws/lambda/fn', '--since', '7d', - ]) + const {exitCode} = await runCli(['logs', '--group', '/aws/lambda/fn', '--since', '7d']) // Should exit cleanly (0 or 1 for not found, never 2) expect(exitCode).not.toBe(2) }) it.skipIf(skipAws)('exits with error for non-existent group', async () => { - const { stderr, exitCode } = await runCli([ - 'logs', '--group', '/not/found/group/xyz', - ]) + const {stderr, exitCode} = await runCli(['logs', '--group', '/not/found/group/xyz']) expect(exitCode).toBeGreaterThan(0) expect(stderr).toMatch(/Log group not found|Access denied|No AWS credentials/) }) diff --git a/tests/integration/pr-review.test.js b/tests/integration/pr-review.test.js index 29c51a3..26eff7b 100644 --- a/tests/integration/pr-review.test.js +++ b/tests/integration/pr-review.test.js @@ -1,23 +1,23 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from './helpers.js' describe('pr review', () => { - it('mostra errore se org non configurata', async () => { - // DVMI_CONFIG_PATH punta a file inesistente → config vuota → errore non-zero - const { exitCode } = await runCli(['pr', 'review']) + it('exits non-zero when org is not configured', async () => { + // DVMI_CONFIG_PATH points to a non-existent file → empty config → non-zero exit + const {exitCode} = await runCli(['pr', 'review']) expect(exitCode).not.toBe(0) }) }) describe('pr detail', () => { - it('errore se --repo non è nel formato owner/repo', async () => { - const { exitCode, stderr } = await runCli(['pr', 'detail', '42', '--repo', 'repo-senza-owner']) + it('exits non-zero when --repo is not in owner/repo format', async () => { + const {exitCode, stderr} = await runCli(['pr', 'detail', '42', '--repo', 'repo-without-owner']) expect(exitCode).not.toBe(0) expect(stderr).toContain('owner/repo') }) - it('errore se manca il numero PR', async () => { - const { exitCode } = await runCli(['pr', 'detail']) + it('exits non-zero when PR number is missing', async () => { + const {exitCode} = await runCli(['pr', 'detail']) expect(exitCode).not.toBe(0) }) }) diff --git a/tests/integration/prompts/browse.test.js b/tests/integration/prompts/browse.test.js index 57e9b88..3c45505 100644 --- a/tests/integration/prompts/browse.test.js +++ b/tests/integration/prompts/browse.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect, beforeAll, afterAll } from 'vitest' -import { runCli, runCliWithMockGitHub, createMockServer, jsonResponse } from '../helpers.js' +import {describe, it, expect, beforeAll, afterAll} from 'vitest' +import {runCli, runCliWithMockGitHub, createMockServer, jsonResponse} from '../helpers.js' function toBase64(str) { return Buffer.from(str, 'utf8').toString('base64') @@ -37,10 +37,10 @@ beforeAll(async () => { } if (req.method === 'GET' && path === '/user') { - return jsonResponse(res, { login: 'testdev', id: 1 }) + return jsonResponse(res, {login: 'testdev', id: 1}) } - return jsonResponse(res, { message: 'Not Found' }, 404) + return jsonResponse(res, {message: 'Not Found'}, 404) }) // Mock skills.sh API @@ -51,13 +51,13 @@ beforeAll(async () => { query: url.searchParams.get('q') ?? '', searchType: 'fuzzy', skills: [ - { id: 'code-review', name: 'Code Review', description: 'Review code', installs: 1200 }, - { id: 'sql-gen', name: 'SQL Generator', description: 'Generate SQL', installs: 800 }, + {id: 'code-review', name: 'Code Review', description: 'Review code', installs: 1200}, + {id: 'sql-gen', name: 'SQL Generator', description: 'Generate SQL', installs: 800}, ], count: 2, }) } - return jsonResponse(res, { message: 'Not Found' }, 404) + return jsonResponse(res, {message: 'Not Found'}, 404) }) }) @@ -67,19 +67,19 @@ afterAll(async () => { describe('dvmi prompts browse', () => { it('--help exits 0 and shows usage', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'browse', '--help']) + const {stdout, exitCode} = await runCli(['prompts', 'browse', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('prompts browse') }) it('--help shows source argument', async () => { - const { stdout } = await runCli(['prompts', 'browse', '--help']) + const {stdout} = await runCli(['prompts', 'browse', '--help']) expect(stdout).toContain('SOURCE') }) it('browse skills --json returns skills array', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'browse', 'skills', '--query', 'review', '--json'], { + const {stdout, exitCode} = await runCli(['prompts', 'browse', 'skills', '--query', 'review', '--json'], { SKILLS_SH_BASE_URL: `http://127.0.0.1:${skillsMock.port}`, }) expect(exitCode).toBe(0) @@ -97,7 +97,7 @@ describe('dvmi prompts browse', () => { }) it('browse skills without --query exits non-zero with actionable error', async () => { - const { stdout, stderr, exitCode } = await runCli(['prompts', 'browse', 'skills', '--json'], { + const {stdout, stderr, exitCode} = await runCli(['prompts', 'browse', 'skills', '--json'], { SKILLS_SH_BASE_URL: `http://127.0.0.1:${skillsMock.port}`, }) expect(exitCode).not.toBe(0) @@ -106,7 +106,7 @@ describe('dvmi prompts browse', () => { }) it('browse awesome --json returns entries array', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub( + const {stdout, exitCode} = await runCliWithMockGitHub( ['prompts', 'browse', 'awesome', '--category', 'agents', '--json'], githubMock.port, ) @@ -125,7 +125,7 @@ describe('dvmi prompts browse', () => { }) it('exits non-zero with invalid source argument', async () => { - const { exitCode } = await runCli(['prompts', 'browse', 'invalid-source', '--json']) + const {exitCode} = await runCli(['prompts', 'browse', 'invalid-source', '--json']) expect(exitCode).not.toBe(0) }) }) diff --git a/tests/integration/prompts/download.test.js b/tests/integration/prompts/download.test.js index 501307e..3070cba 100644 --- a/tests/integration/prompts/download.test.js +++ b/tests/integration/prompts/download.test.js @@ -1,8 +1,8 @@ -import { describe, it, expect, beforeAll, afterAll, beforeEach } from 'vitest' -import { mkdtemp, rm, writeFile, mkdir } from 'node:fs/promises' -import { join } from 'node:path' -import { tmpdir } from 'node:os' -import { runCli, runCliWithMockGitHub, createMockServer, jsonResponse } from '../helpers.js' +import {describe, it, expect, beforeAll, afterAll, beforeEach} from 'vitest' +import {mkdtemp, rm, writeFile, mkdir} from 'node:fs/promises' +import {join} from 'node:path' +import {tmpdir} from 'node:os' +import {runCli, runCliWithMockGitHub, createMockServer, jsonResponse} from '../helpers.js' function toBase64(str) { return Buffer.from(str, 'utf8').toString('base64') @@ -35,7 +35,7 @@ beforeAll(async () => { // Tree endpoint (used by interactive select in list) if (req.method === 'GET' && /\/repos\/savez\/prompt-for-ai\/git\/trees\//.test(path)) { return jsonResponse(res, { - tree: [{ type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc' }], + tree: [{type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc'}], truncated: false, }) } @@ -45,7 +45,7 @@ beforeAll(async () => { if (req.method === 'GET' && contentsMatch) { const filePath = decodeURIComponent(contentsMatch[1]) const content = contentMap[filePath] - if (!content) return jsonResponse(res, { message: 'Not Found' }, 404) + if (!content) return jsonResponse(res, {message: 'Not Found'}, 404) return jsonResponse(res, { type: 'file', encoding: 'base64', @@ -55,10 +55,10 @@ beforeAll(async () => { } if (req.method === 'GET' && path === '/user') { - return jsonResponse(res, { login: 'testdev', id: 1 }) + return jsonResponse(res, {login: 'testdev', id: 1}) } - return jsonResponse(res, { message: 'Not Found' }, 404) + return jsonResponse(res, {message: 'Not Found'}, 404) }) }) @@ -73,7 +73,7 @@ beforeEach(async () => { // afterEach is intentionally omitted — temp dirs will be cleaned up by the OS // but we remove them explicitly for cleanliness afterAll(async () => { - if (tmpDir) await rm(tmpDir, { recursive: true, force: true }) + if (tmpDir) await rm(tmpDir, {recursive: true, force: true}) }) /** @@ -83,26 +83,24 @@ afterAll(async () => { * @returns {Promise<{stdout: string, stderr: string, exitCode: number}>} */ function run(args, extra = {}) { - return runCliWithMockGitHub(args, mock.port, { DVMI_PROMPTS_DIR: join(tmpDir, '.prompts'), ...extra }) + return runCliWithMockGitHub(args, mock.port, {DVMI_PROMPTS_DIR: join(tmpDir, '.prompts'), ...extra}) } describe('dvmi prompts download', () => { it('--help exits 0 and shows usage', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'download', '--help']) + const {stdout, exitCode} = await runCli(['prompts', 'download', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('prompts download') }) it('--help shows --overwrite flag', async () => { - const { stdout } = await runCli(['prompts', 'download', '--help']) + const {stdout} = await runCli(['prompts', 'download', '--help']) expect(stdout).toContain('--overwrite') }) it('--json with explicit path downloads and returns downloaded array', async () => { - const { stdout, exitCode } = await run([ - 'prompts', 'download', 'coding/refactor-prompt.md', '--json', - ]) + const {stdout, exitCode} = await run(['prompts', 'download', 'coding/refactor-prompt.md', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('downloaded') @@ -116,12 +114,10 @@ describe('dvmi prompts download', () => { it('--json with explicit path skips when file already exists', async () => { // Pre-create the destination file inside the prompts dir const promptsDir = join(tmpDir, '.prompts', 'coding') - await mkdir(promptsDir, { recursive: true }) + await mkdir(promptsDir, {recursive: true}) await writeFile(join(promptsDir, 'refactor-prompt.md'), 'existing content') - const { stdout, exitCode } = await run([ - 'prompts', 'download', 'coding/refactor-prompt.md', '--json', - ]) + const {stdout, exitCode} = await run(['prompts', 'download', 'coding/refactor-prompt.md', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data.skipped).toHaveLength(1) @@ -130,12 +126,10 @@ describe('dvmi prompts download', () => { it('--json --overwrite replaces existing file', async () => { const promptsDir = join(tmpDir, '.prompts', 'coding') - await mkdir(promptsDir, { recursive: true }) + await mkdir(promptsDir, {recursive: true}) await writeFile(join(promptsDir, 'refactor-prompt.md'), 'old content') - const { stdout, exitCode } = await run([ - 'prompts', 'download', 'coding/refactor-prompt.md', '--overwrite', '--json', - ]) + const {stdout, exitCode} = await run(['prompts', 'download', 'coding/refactor-prompt.md', '--overwrite', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data.downloaded).toHaveLength(1) @@ -143,7 +137,7 @@ describe('dvmi prompts download', () => { }) it('--json exits 1 when path argument is missing', async () => { - const { stdout, stderr, exitCode } = await run(['prompts', 'download', '--json']) + const {stdout, stderr, exitCode} = await run(['prompts', 'download', '--json']) expect(exitCode).not.toBe(0) // oclif outputs errors to stdout as JSON in --json mode const combined = stdout + stderr @@ -151,9 +145,7 @@ describe('dvmi prompts download', () => { }) it('--json exits non-zero when prompt path does not exist in repo', async () => { - const { exitCode } = await run([ - 'prompts', 'download', 'nonexistent/prompt.md', '--json', - ]) + const {exitCode} = await run(['prompts', 'download', 'nonexistent/prompt.md', '--json']) expect(exitCode).not.toBe(0) }) }) diff --git a/tests/integration/prompts/install-speckit.test.js b/tests/integration/prompts/install-speckit.test.js index d247f9c..cc7f370 100644 --- a/tests/integration/prompts/install-speckit.test.js +++ b/tests/integration/prompts/install-speckit.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' /** * Run `dvmi prompts install-speckit` with optional extra args. @@ -16,49 +16,49 @@ function run(extraArgs = [], env = {}) { describe('dvmi prompts install-speckit', () => { it('--help exits 0 and shows usage', async () => { - const { stdout, exitCode } = await run(['--help']) + const {stdout, exitCode} = await run(['--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('install-speckit') }) it('--help shows --force flag', async () => { - const { stdout } = await run(['--help']) + const {stdout} = await run(['--help']) expect(stdout).toContain('--force') }) it('--help shows --ai flag', async () => { - const { stdout } = await run(['--help']) + const {stdout} = await run(['--help']) expect(stdout).toContain('--ai') }) it('--help shows --reinstall flag', async () => { - const { stdout } = await run(['--help']) + const {stdout} = await run(['--help']) expect(stdout).toContain('--reinstall') }) it('exits non-zero with actionable message when uv is not installed', async () => { // Provide a PATH that contains no `uv` binary so isUvInstalled() returns false. - const { stderr, exitCode } = await run([], { PATH: '/dev/null' }) + const {stderr, exitCode} = await run([], {PATH: '/dev/null'}) expect(exitCode).not.toBe(0) const combined = stderr expect(combined.toLowerCase()).toContain('uv') }) it('succeeds (exit 0) when uv and specify stubs are in PATH', async () => { - const { exitCode } = await run() + const {exitCode} = await run() expect(exitCode).toBe(0) }) it('passes --force through to specify init', async () => { - const { stdout, exitCode } = await run(['--force']) + const {stdout, exitCode} = await run(['--force']) expect(exitCode).toBe(0) // The fake specify stub echoes its args; --force must be forwarded expect(stdout).toContain('--force') }) it('passes --ai flag through to specify init', async () => { - const { stdout, exitCode } = await run(['--ai', 'opencode']) + const {stdout, exitCode} = await run(['--ai', 'opencode']) expect(exitCode).toBe(0) expect(stdout).toContain('--ai') expect(stdout).toContain('opencode') diff --git a/tests/integration/prompts/list.test.js b/tests/integration/prompts/list.test.js index 1a49004..3a25c14 100644 --- a/tests/integration/prompts/list.test.js +++ b/tests/integration/prompts/list.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect, beforeAll, afterAll } from 'vitest' -import { runCli, runCliWithMockGitHub, createMockServer, jsonResponse } from '../helpers.js' +import {describe, it, expect, beforeAll, afterAll} from 'vitest' +import {runCli, runCliWithMockGitHub, createMockServer, jsonResponse} from '../helpers.js' function toBase64(str) { return Buffer.from(str, 'utf8').toString('base64') @@ -45,11 +45,11 @@ beforeAll(async () => { if (req.method === 'GET' && /\/repos\/savez\/prompt-for-ai\/git\/trees\//.test(path)) { return jsonResponse(res, { tree: [ - { type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc' }, - { type: 'blob', path: 'testing/test-generator.md', sha: 'def' }, - { type: 'blob', path: 'README.md', sha: 'r1' }, - { type: 'blob', path: 'CONTRIBUTING.md', sha: 'c1' }, - { type: 'blob', path: 'PULL_REQUEST_TEMPLATE.md', sha: 'p1' }, + {type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc'}, + {type: 'blob', path: 'testing/test-generator.md', sha: 'def'}, + {type: 'blob', path: 'README.md', sha: 'r1'}, + {type: 'blob', path: 'CONTRIBUTING.md', sha: 'c1'}, + {type: 'blob', path: 'PULL_REQUEST_TEMPLATE.md', sha: 'p1'}, ], truncated: false, }) @@ -60,7 +60,7 @@ beforeAll(async () => { if (req.method === 'GET' && contentsMatch) { const filePath = decodeURIComponent(contentsMatch[1]) const content = contentMap[filePath] - if (!content) return jsonResponse(res, { message: 'Not Found' }, 404) + if (!content) return jsonResponse(res, {message: 'Not Found'}, 404) return jsonResponse(res, { type: 'file', encoding: 'base64', @@ -71,10 +71,10 @@ beforeAll(async () => { // GET /user (Octokit auth check) if (req.method === 'GET' && path === '/user') { - return jsonResponse(res, { login: 'testdev', id: 1 }) + return jsonResponse(res, {login: 'testdev', id: 1}) } - return jsonResponse(res, { message: 'Not Found' }, 404) + return jsonResponse(res, {message: 'Not Found'}, 404) }) }) @@ -84,20 +84,20 @@ afterAll(async () => { describe('dvmi prompts list', () => { it('--help exits 0 and shows usage', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'list', '--help']) + const {stdout, exitCode} = await runCli(['prompts', 'list', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('prompts list') }) it('--help shows --filter flag', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'list', '--help']) + const {stdout, exitCode} = await runCli(['prompts', 'list', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('--filter') }) it('--json returns only prompt files (excludes README/CONTRIBUTING/PULL_REQUEST_TEMPLATE)', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) + const {stdout, exitCode} = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data.total).toBe(2) @@ -108,7 +108,7 @@ describe('dvmi prompts list', () => { }) it('--json returns prompts array with expected shape', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) + const {stdout, exitCode} = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('prompts') @@ -124,9 +124,9 @@ describe('dvmi prompts list', () => { }) it('--json returns prompts with correct titles and categories', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) + const {stdout, exitCode} = await runCliWithMockGitHub(['prompts', 'list', '--json'], mock.port) expect(exitCode).toBe(0) - const { prompts } = JSON.parse(stdout) + const {prompts} = JSON.parse(stdout) const refactor = prompts.find((p) => p.path === 'coding/refactor-prompt.md') expect(refactor?.title).toBe('Refactor Prompt') @@ -139,29 +139,29 @@ describe('dvmi prompts list', () => { }) it('--filter narrows results by title/category', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub( + const {stdout, exitCode} = await runCliWithMockGitHub( ['prompts', 'list', '--filter', 'refactor', '--json'], mock.port, ) expect(exitCode).toBe(0) - const { prompts, total } = JSON.parse(stdout) + const {prompts, total} = JSON.parse(stdout) expect(total).toBe(1) expect(prompts[0].title).toBe('Refactor Prompt') }) it('--filter with no matches returns empty array', async () => { - const { stdout, exitCode } = await runCliWithMockGitHub( + const {stdout, exitCode} = await runCliWithMockGitHub( ['prompts', 'list', '--filter', 'zzznonexistent', '--json'], mock.port, ) expect(exitCode).toBe(0) - const { prompts, total } = JSON.parse(stdout) + const {prompts, total} = JSON.parse(stdout) expect(Array.isArray(prompts)).toBe(true) expect(total).toBe(0) }) it('exits with code 1 and actionable error when GitHub API is unreachable', async () => { - const { stdout, stderr, exitCode } = await runCli(['prompts', 'list', '--json'], { + const {stdout, stderr, exitCode} = await runCli(['prompts', 'list', '--json'], { GITHUB_API_URL: 'http://127.0.0.1:1', // unreachable port }) expect(exitCode).not.toBe(0) diff --git a/tests/integration/prompts/run.test.js b/tests/integration/prompts/run.test.js index 0bdc4f9..7a51cf6 100644 --- a/tests/integration/prompts/run.test.js +++ b/tests/integration/prompts/run.test.js @@ -1,8 +1,8 @@ -import { describe, it, expect, afterAll, beforeEach } from 'vitest' -import { mkdtemp, rm, writeFile, mkdir } from 'node:fs/promises' -import { join } from 'node:path' -import { tmpdir } from 'node:os' -import { runCli } from '../helpers.js' +import {describe, it, expect, afterAll, beforeEach} from 'vitest' +import {mkdtemp, rm, writeFile, mkdir} from 'node:fs/promises' +import {join} from 'node:path' +import {tmpdir} from 'node:os' +import {runCli} from '../helpers.js' const REFACTOR_CONTENT = `--- title: Refactor Prompt @@ -22,7 +22,7 @@ beforeEach(async () => { afterAll(async () => { // Cleanup any leftover temp dirs - if (promptsDir) await rm(promptsDir, { recursive: true, force: true }).catch(() => {}) + if (promptsDir) await rm(promptsDir, {recursive: true, force: true}).catch(() => {}) }) /** @@ -32,30 +32,35 @@ afterAll(async () => { * @returns {Promise<{ stdout: string, stderr: string, exitCode: number }>} */ function run(args, extra = {}) { - return runCli(args, { DVMI_PROMPTS_DIR: promptsDir, ...extra }) + return runCli(args, {DVMI_PROMPTS_DIR: promptsDir, ...extra}) } describe('dvmi prompts run', () => { it('--help exits 0 and shows usage', async () => { - const { stdout, exitCode } = await runCli(['prompts', 'run', '--help']) + const {stdout, exitCode} = await runCli(['prompts', 'run', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('prompts run') }) it('--help shows --tool flag', async () => { - const { stdout } = await runCli(['prompts', 'run', '--help']) + const {stdout} = await runCli(['prompts', 'run', '--help']) expect(stdout).toContain('--tool') }) describe('--json mode', () => { it('outputs invocation plan when prompt exists and tool is configured', async () => { // Write a local prompt file - await mkdir(join(promptsDir, 'coding'), { recursive: true }) + await mkdir(join(promptsDir, 'coding'), {recursive: true}) await writeFile(join(promptsDir, 'coding', 'refactor-prompt.md'), REFACTOR_CONTENT) - const { stdout, exitCode } = await run([ - 'prompts', 'run', 'coding/refactor-prompt.md', '--tool', 'opencode', '--json', + const {stdout, exitCode} = await run([ + 'prompts', + 'run', + 'coding/refactor-prompt.md', + '--tool', + 'opencode', + '--json', ]) expect(exitCode).toBe(0) @@ -68,11 +73,16 @@ describe('dvmi prompts run', () => { }) it('invocation field includes the tool binary and flag', async () => { - await mkdir(join(promptsDir, 'coding'), { recursive: true }) + await mkdir(join(promptsDir, 'coding'), {recursive: true}) await writeFile(join(promptsDir, 'coding', 'refactor-prompt.md'), REFACTOR_CONTENT) - const { stdout, exitCode } = await run([ - 'prompts', 'run', 'coding/refactor-prompt.md', '--tool', 'opencode', '--json', + const {stdout, exitCode} = await run([ + 'prompts', + 'run', + 'coding/refactor-prompt.md', + '--tool', + 'opencode', + '--json', ]) expect(exitCode).toBe(0) const data = JSON.parse(stdout) @@ -81,19 +91,17 @@ describe('dvmi prompts run', () => { }) it('exits 1 when no path is provided in --json mode', async () => { - const { stdout, stderr, exitCode } = await run(['prompts', 'run', '--tool', 'opencode', '--json']) + const {stdout, stderr, exitCode} = await run(['prompts', 'run', '--tool', 'opencode', '--json']) expect(exitCode).not.toBe(0) const combined = stdout + stderr expect(combined.length).toBeGreaterThan(0) }) it('exits 1 when no tool is configured and --tool is not passed', async () => { - await mkdir(join(promptsDir, 'coding'), { recursive: true }) + await mkdir(join(promptsDir, 'coding'), {recursive: true}) await writeFile(join(promptsDir, 'coding', 'refactor-prompt.md'), REFACTOR_CONTENT) - const { stdout, stderr, exitCode } = await run([ - 'prompts', 'run', 'coding/refactor-prompt.md', '--json', - ]) + const {stdout, stderr, exitCode} = await run(['prompts', 'run', 'coding/refactor-prompt.md', '--json']) expect(exitCode).not.toBe(0) const combined = stdout + stderr // oclif --json mode puts the error in stdout as JSON; match against suggestions @@ -101,8 +109,13 @@ describe('dvmi prompts run', () => { }) it('exits 1 when the local prompt file does not exist', async () => { - const { stdout, stderr, exitCode } = await run([ - 'prompts', 'run', 'nonexistent/prompt.md', '--tool', 'opencode', '--json', + const {stdout, stderr, exitCode} = await run([ + 'prompts', + 'run', + 'nonexistent/prompt.md', + '--tool', + 'opencode', + '--json', ]) expect(exitCode).not.toBe(0) const combined = stdout + stderr diff --git a/tests/integration/security/setup.test.js b/tests/integration/security/setup.test.js index 934b63e..c8686dd 100644 --- a/tests/integration/security/setup.test.js +++ b/tests/integration/security/setup.test.js @@ -1,18 +1,18 @@ -import { describe, it, expect } from 'vitest' -import { runCli } from '../helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli} from '../helpers.js' describe('dvmi security setup', () => { // --------------------------------------------------------------------------- // --help // --------------------------------------------------------------------------- it('--help exits 0 and mentions credential protection', async () => { - const { stdout, exitCode } = await runCli(['security', 'setup', '--help']) + const {stdout, exitCode} = await runCli(['security', 'setup', '--help']) expect(exitCode).toBe(0) expect(stdout.toLowerCase()).toContain('credential') }) it('--help includes expected flags', async () => { - const { stdout } = await runCli(['security', 'setup', '--help']) + const {stdout} = await runCli(['security', 'setup', '--help']) expect(stdout).toContain('--help') expect(stdout).toContain('--json') }) @@ -21,7 +21,7 @@ describe('dvmi security setup', () => { // --json // --------------------------------------------------------------------------- it('--json exits 0 and returns valid JSON', async () => { - const { stdout, exitCode } = await runCli(['security', 'setup', '--json']) + const {stdout, exitCode} = await runCli(['security', 'setup', '--json']) expect(exitCode).toBe(0) const data = JSON.parse(stdout) expect(data).toHaveProperty('platform') @@ -30,7 +30,7 @@ describe('dvmi security setup', () => { }) it('--json tools array includes entries with required fields', async () => { - const { stdout } = await runCli(['security', 'setup', '--json']) + const {stdout} = await runCli(['security', 'setup', '--json']) const data = JSON.parse(stdout) expect(Array.isArray(data.tools)).toBe(true) expect(data.tools.length).toBeGreaterThan(0) @@ -42,19 +42,19 @@ describe('dvmi security setup', () => { }) it('--json platform field matches a supported platform', async () => { - const { stdout } = await runCli(['security', 'setup', '--json']) + const {stdout} = await runCli(['security', 'setup', '--json']) const data = JSON.parse(stdout) expect(['macos', 'linux', 'wsl2']).toContain(data.platform) }) it('--json overallStatus is one of the expected values', async () => { - const { stdout } = await runCli(['security', 'setup', '--json']) + const {stdout} = await runCli(['security', 'setup', '--json']) const data = JSON.parse(stdout) expect(['success', 'partial', 'not-configured']).toContain(data.overallStatus) }) it('--json selection is null when run as health-check', async () => { - const { stdout } = await runCli(['security', 'setup', '--json']) + const {stdout} = await runCli(['security', 'setup', '--json']) const data = JSON.parse(stdout) expect(data.selection).toBeNull() }) @@ -63,7 +63,7 @@ describe('dvmi security setup', () => { // CI=true non-interactive exit // --------------------------------------------------------------------------- it('CI=true without --json exits non-zero with descriptive error', async () => { - const { stderr, exitCode } = await runCli(['security', 'setup'], { CI: 'true' }) + const {stderr, exitCode} = await runCli(['security', 'setup'], {CI: 'true'}) expect(exitCode).not.toBe(0) expect(stderr.toLowerCase()).toMatch(/interactive|terminal|tty/) }) @@ -72,7 +72,7 @@ describe('dvmi security setup', () => { // n/a for non-applicable tools // --------------------------------------------------------------------------- it('--json tools array marks non-applicable tools as n/a for detected platform', async () => { - const { stdout } = await runCli(['security', 'setup', '--json']) + const {stdout} = await runCli(['security', 'setup', '--json']) const data = JSON.parse(stdout) const platform = data.platform diff --git a/tests/integration/setup.js b/tests/integration/setup.js index d5d7a3b..5504fa3 100644 --- a/tests/integration/setup.js +++ b/tests/integration/setup.js @@ -1,5 +1,5 @@ -import { createServer } from 'node:http' -import { beforeAll, afterAll } from 'vitest' +import {createServer} from 'node:http' +import {beforeAll, afterAll} from 'vitest' /** * Minimal ClickUp API mock server for integration tests. @@ -15,13 +15,13 @@ const TASKS_PAGE_0 = [ { id: 'abc123', name: 'Implement user auth', - status: { status: 'in progress', type: 'in_progress' }, - priority: { id: '2' }, + status: {status: 'in progress', type: 'in_progress'}, + priority: {id: '2'}, due_date: null, url: 'https://app.clickup.com/t/abc123', - assignees: [{ username: 'testdev' }], - list: { id: 'L1', name: 'Sprint 42' }, - folder: { id: 'F1', name: 'Backend', hidden: false }, + assignees: [{username: 'testdev'}], + list: {id: 'L1', name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, }, ] @@ -29,24 +29,24 @@ const LIST_TASKS = [ { id: 'list-task-1', name: 'List task alpha', - status: { status: 'in progress', type: 'in_progress' }, - priority: { id: '2' }, + status: {status: 'in progress', type: 'in_progress'}, + priority: {id: '2'}, due_date: null, url: 'https://app.clickup.com/t/list-task-1', - assignees: [{ username: 'testdev' }], - list: { id: 'L1', name: 'Sprint 42' }, - folder: { id: 'F1', name: 'Backend', hidden: false }, + assignees: [{username: 'testdev'}], + list: {id: 'L1', name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, }, { id: 'list-task-2', name: 'List task beta (root list)', - status: { status: 'todo', type: 'open' }, - priority: { id: '3' }, + status: {status: 'todo', type: 'open'}, + priority: {id: '3'}, due_date: null, url: 'https://app.clickup.com/t/list-task-2', - assignees: [{ username: 'testdev' }], - list: { id: 'L1', name: 'Sprint 42' }, - folder: { hidden: true }, + assignees: [{username: 'testdev'}], + list: {id: 'L1', name: 'Sprint 42'}, + folder: {hidden: true}, }, ] @@ -57,7 +57,7 @@ const LIST_TASKS = [ */ function json(res, data, status = 200) { const body = JSON.stringify(data) - res.writeHead(status, { 'Content-Type': 'application/json', 'Content-Length': Buffer.byteLength(body) }) + res.writeHead(status, {'Content-Type': 'application/json', 'Content-Length': Buffer.byteLength(body)}) res.end(body) } @@ -67,31 +67,31 @@ const server = createServer((req, res) => { // GET /api/v2/user if (req.method === 'GET' && path === '/api/v2/user') { - return json(res, { user: { id: 42, username: 'testdev' } }) + return json(res, {user: {id: 42, username: 'testdev'}}) } // GET /api/v2/team/:teamId/task const teamTaskMatch = path.match(/^\/api\/v2\/team\/[^/]+\/task$/) if (req.method === 'GET' && teamTaskMatch) { - return json(res, { tasks: TASKS_PAGE_0, has_more: false }) + return json(res, {tasks: TASKS_PAGE_0, has_more: false}) } // GET /api/v2/list/:listId/task const listTaskMatch = path.match(/^\/api\/v2\/list\/([^/]+)\/task$/) if (req.method === 'GET' && listTaskMatch) { if (listTaskMatch[1] === 'NOTFOUND') { - return json(res, { err: 'List not found' }, 404) + return json(res, {err: 'List not found'}, 404) } - return json(res, { tasks: LIST_TASKS, has_more: false }) + return json(res, {tasks: LIST_TASKS, has_more: false}) } // Fallback 404 - json(res, { err: 'Not found' }, 404) + json(res, {err: 'Not found'}, 404) }) beforeAll(async () => { await new Promise((resolve) => server.listen(0, '127.0.0.1', resolve)) - const { port } = /** @type {import('node:net').AddressInfo} */ (server.address()) + const {port} = /** @type {import('node:net').AddressInfo} */ (server.address()) process.env.CLICKUP_API_BASE = `http://127.0.0.1:${port}/api/v2` }) diff --git a/tests/integration/sync-config-ai.test.js b/tests/integration/sync-config-ai.test.js new file mode 100644 index 0000000..c5fd3d1 --- /dev/null +++ b/tests/integration/sync-config-ai.test.js @@ -0,0 +1,25 @@ +import {describe, it, expect} from 'vitest' +import {runCli, runCliJson} from './helpers.js' + +describe('dvmi sync-config-ai', () => { + // T023: --help exits 0 and mentions AI/config/sync + it('--help exits 0 and mentions AI environments or config', async () => { + const {stdout, exitCode} = await runCli(['sync-config-ai', '--help']) + expect(exitCode).toBe(0) + expect(stdout.toLowerCase()).toMatch(/ai|config|sync|environment/) + }) + + // T047: --json exits 0 and outputs valid JSON with environments and categories + it('--json exits 0 and outputs valid JSON with environments and categories keys', async () => { + const result = await runCliJson(['sync-config-ai']) + expect(result).toHaveProperty('environments') + expect(result).toHaveProperty('categories') + expect(Array.isArray(result.environments)).toBe(true) + expect(result.categories).toHaveProperty('mcp') + expect(result.categories).toHaveProperty('command') + expect(result.categories).toHaveProperty('skill') + expect(result.categories).toHaveProperty('agent') + expect(Array.isArray(result.categories.mcp)).toBe(true) + expect(Array.isArray(result.categories.command)).toBe(true) + }) +}) diff --git a/tests/integration/tasks-assigned.test.js b/tests/integration/tasks-assigned.test.js index b01b709..b430a24 100644 --- a/tests/integration/tasks-assigned.test.js +++ b/tests/integration/tasks-assigned.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { runCli, runCliJson } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {runCli, runCliJson} from './helpers.js' // Tests that call the real ClickUp API require a token in the keychain. // In CI there are no real credentials, so we skip those tests. @@ -7,7 +7,7 @@ const isCI = Boolean(process.env.CI) describe('tasks assigned', () => { it.skipIf(isCI)('shows assigned tasks table with Lista and Cartella columns', async () => { - const { stdout, exitCode } = await runCli(['tasks', 'assigned']) + const {stdout, exitCode} = await runCli(['tasks', 'assigned']) expect(exitCode).toBe(0) expect(stdout).toContain('Your assigned tasks') expect(stdout).toContain('Lista') @@ -29,7 +29,7 @@ describe('tasks assigned', () => { }) it('--help shows command description', async () => { - const { stdout, exitCode } = await runCli(['tasks', 'assigned', '--help']) + const {stdout, exitCode} = await runCli(['tasks', 'assigned', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('assigned') }) diff --git a/tests/integration/vuln-detail.test.js b/tests/integration/vuln-detail.test.js index d9fa5db..4e9348c 100644 --- a/tests/integration/vuln-detail.test.js +++ b/tests/integration/vuln-detail.test.js @@ -1,17 +1,15 @@ -import { describe, it, expect } from 'vitest' -import { runCli, createMockServer, jsonResponse } from './helpers.js' -import { readFileSync } from 'node:fs' -import { resolve, dirname } from 'node:path' -import { fileURLToPath } from 'node:url' +import {describe, it, expect} from 'vitest' +import {runCli, createMockServer, jsonResponse} from './helpers.js' +import {readFileSync} from 'node:fs' +import {resolve, dirname} from 'node:path' +import {fileURLToPath} from 'node:url' const __dirname = dirname(fileURLToPath(import.meta.url)) -const detailFixture = JSON.parse( - readFileSync(resolve(__dirname, '../fixtures/nvd-responses/cve-detail.json'), 'utf8'), -) +const detailFixture = JSON.parse(readFileSync(resolve(__dirname, '../fixtures/nvd-responses/cve-detail.json'), 'utf8')) describe('dvmi vuln detail', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['vuln', 'detail', '--help']) + const {stdout, exitCode} = await runCli(['vuln', 'detail', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('CVEID') @@ -19,12 +17,12 @@ describe('dvmi vuln detail', () => { }) it('exits 2 when CVE ID is missing', async () => { - const { exitCode } = await runCli(['vuln', 'detail']) + const {exitCode} = await runCli(['vuln', 'detail']) expect(exitCode).toBe(2) }) it('exits 1 when CVE ID format is invalid', async () => { - const { stderr, exitCode } = await runCli(['vuln', 'detail', 'not-a-cve']) + const {stderr, exitCode} = await runCli(['vuln', 'detail', 'not-a-cve']) expect(exitCode).toBeGreaterThanOrEqual(1) expect(stderr).toMatch(/Invalid CVE ID/i) }) @@ -35,10 +33,7 @@ describe('dvmi vuln detail', () => { }) try { - const { stdout, exitCode } = await runCli( - ['vuln', 'detail', 'CVE-2021-44228'], - { NVD_BASE_URL: server.url }, - ) + const {stdout, exitCode} = await runCli(['vuln', 'detail', 'CVE-2021-44228'], {NVD_BASE_URL: server.url}) expect(exitCode).toBe(0) expect(stdout).toContain('CVE-2021-44228') } finally { @@ -47,10 +42,7 @@ describe('dvmi vuln detail', () => { }) it('outputs valid JSON structure with --json flag', async () => { - const { stdout, stderr, exitCode } = await runCli( - ['vuln', 'detail', 'CVE-2021-44228', '--json'], - {}, - ) + const {stdout, stderr, exitCode} = await runCli(['vuln', 'detail', 'CVE-2021-44228', '--json'], {}) if (exitCode === 0) { const data = JSON.parse(stdout) expect(data).toHaveProperty('id', 'CVE-2021-44228') diff --git a/tests/integration/vuln-scan.test.js b/tests/integration/vuln-scan.test.js index 85e59aa..e416db4 100644 --- a/tests/integration/vuln-scan.test.js +++ b/tests/integration/vuln-scan.test.js @@ -1,12 +1,12 @@ -import { describe, it, expect } from 'vitest' -import { mkdtemp, writeFile, rm } from 'node:fs/promises' -import { tmpdir } from 'node:os' -import { join } from 'node:path' -import { runCli } from './helpers.js' +import {describe, it, expect} from 'vitest' +import {mkdtemp, writeFile, rm} from 'node:fs/promises' +import {tmpdir} from 'node:os' +import {join} from 'node:path' +import {runCli} from './helpers.js' describe('dvmi vuln scan', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['vuln', 'scan', '--help']) + const {stdout, exitCode} = await runCli(['vuln', 'scan', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('--severity') @@ -17,7 +17,7 @@ describe('dvmi vuln scan', () => { it('exits 2 when no supported lock file is present in an empty directory', async () => { const tmpDir = await mkdtemp(join(tmpdir(), 'dvmi-scan-')) try { - const { exitCode, stdout, stderr } = await runCli(['vuln', 'scan'], { + const {exitCode, stdout, stderr} = await runCli(['vuln', 'scan'], { DVMI_SCAN_DIR: tmpDir, }) // exit 2 = no package manager detected @@ -25,14 +25,14 @@ describe('dvmi vuln scan', () => { const output = stdout + stderr expect(output).toMatch(/No supported package manager/i) } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) it('returns JSON with empty findings when no lock file found and --json is passed', async () => { const tmpDir = await mkdtemp(join(tmpdir(), 'dvmi-scan-')) try { - const { stdout, exitCode } = await runCli(['vuln', 'scan', '--json'], { + const {stdout, exitCode} = await runCli(['vuln', 'scan', '--json'], { DVMI_SCAN_DIR: tmpDir, }) // With --json it should exit 0 and return structured JSON even with no lock file @@ -47,7 +47,7 @@ describe('dvmi vuln scan', () => { expect(exitCode).toBe(2) } } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) @@ -59,7 +59,7 @@ describe('dvmi vuln scan', () => { // The fake bin directory provides a `pnpm` stub that outputs npm-audit-like JSON. // We run the scan and just assert the command runs and exits with 0 or 1 (not 2). - const { exitCode, stdout, stderr } = await runCli(['vuln', 'scan'], { + const {exitCode, stdout, stderr} = await runCli(['vuln', 'scan'], { DVMI_SCAN_DIR: tmpDir, }) // 0 = no vulns, 1 = vulns found, 2 = no lockfile detected @@ -69,7 +69,7 @@ describe('dvmi vuln scan', () => { const combined = stdout + stderr expect(combined.length).toBeGreaterThan(0) } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) @@ -78,18 +78,18 @@ describe('dvmi vuln scan', () => { try { await writeFile(join(tmpDir, 'pnpm-lock.yaml'), 'lockfileVersion: 6.0\n', 'utf8') - const { exitCode } = await runCli(['vuln', 'scan', '--no-fail'], { + const {exitCode} = await runCli(['vuln', 'scan', '--no-fail'], { DVMI_SCAN_DIR: tmpDir, }) // With --no-fail the exit code must always be 0 (or 2 for no lockfile, but we have one) expect(exitCode).not.toBe(1) } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) it('--severity flag is validated', async () => { - const { stderr, exitCode } = await runCli(['vuln', 'scan', '--severity', 'extreme']) + const {stderr, exitCode} = await runCli(['vuln', 'scan', '--severity', 'extreme']) expect(exitCode).toBe(2) expect(stderr).toMatch(/Expected.*severity|severity.*expected/i) }) @@ -101,7 +101,7 @@ describe('dvmi vuln scan', () => { // runCli always uses isTTY=false (spawned subprocess with non-TTY stdio) // so static table output should appear, not the interactive alt-screen TUI - const { stdout, stderr, exitCode } = await runCli(['vuln', 'scan', '--no-fail'], { + const {stdout, stderr, exitCode} = await runCli(['vuln', 'scan', '--no-fail'], { DVMI_SCAN_DIR: tmpDir, }) const combined = stdout + stderr @@ -114,7 +114,7 @@ describe('dvmi vuln scan', () => { // Exit code must be 0 (--no-fail) or 0 (no vulns found) expect(exitCode).toBe(0) } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) @@ -123,7 +123,7 @@ describe('dvmi vuln scan', () => { try { await writeFile(join(tmpDir, 'pnpm-lock.yaml'), 'lockfileVersion: 6.0\n', 'utf8') - const { stdout, exitCode } = await runCli(['vuln', 'scan', '--json', '--no-fail'], { + const {stdout, exitCode} = await runCli(['vuln', 'scan', '--json', '--no-fail'], { DVMI_SCAN_DIR: tmpDir, }) @@ -142,7 +142,7 @@ describe('dvmi vuln scan', () => { expect(data.summary).toHaveProperty('total') } } finally { - await rm(tmpDir, { recursive: true, force: true }) + await rm(tmpDir, {recursive: true, force: true}) } }) }) diff --git a/tests/integration/vuln-search.test.js b/tests/integration/vuln-search.test.js index 0e26327..1f1dd47 100644 --- a/tests/integration/vuln-search.test.js +++ b/tests/integration/vuln-search.test.js @@ -1,8 +1,8 @@ -import { describe, it, expect } from 'vitest' -import { runCli, createMockServer, jsonResponse } from './helpers.js' -import { readFileSync } from 'node:fs' -import { resolve, dirname } from 'node:path' -import { fileURLToPath } from 'node:url' +import {describe, it, expect} from 'vitest' +import {runCli, createMockServer, jsonResponse} from './helpers.js' +import {readFileSync} from 'node:fs' +import {resolve, dirname} from 'node:path' +import {fileURLToPath} from 'node:url' const __dirname = dirname(fileURLToPath(import.meta.url)) const searchFixture = JSON.parse( @@ -11,7 +11,7 @@ const searchFixture = JSON.parse( describe('dvmi vuln search', () => { it('shows help', async () => { - const { stdout, exitCode } = await runCli(['vuln', 'search', '--help']) + const {stdout, exitCode} = await runCli(['vuln', 'search', '--help']) expect(exitCode).toBe(0) expect(stdout).toContain('USAGE') expect(stdout).toContain('keyword') @@ -26,7 +26,7 @@ describe('dvmi vuln search', () => { }) try { - const { exitCode } = await runCli(['vuln', 'search'], { NVD_BASE_URL: server.url }) + const {exitCode} = await runCli(['vuln', 'search'], {NVD_BASE_URL: server.url}) expect(exitCode).toBe(0) } finally { await server.stop() @@ -34,13 +34,13 @@ describe('dvmi vuln search', () => { }) it('exits 2 when --severity is invalid', async () => { - const { stderr, exitCode } = await runCli(['vuln', 'search', 'openssl', '--severity', 'EXTREME']) + const {stderr, exitCode} = await runCli(['vuln', 'search', 'openssl', '--severity', 'EXTREME']) expect(exitCode).toBe(2) expect(stderr).toMatch(/Expected.*severity/i) }) it('exits 2 when --days is out of range', async () => { - const { stderr, exitCode } = await runCli(['vuln', 'search', 'openssl', '--days', '200']) + const {stderr, exitCode} = await runCli(['vuln', 'search', 'openssl', '--days', '200']) expect(exitCode).toBe(2) expect(stderr).toMatch(/days must be between/) }) @@ -51,10 +51,7 @@ describe('dvmi vuln search', () => { }) try { - const { stdout, exitCode } = await runCli( - ['vuln', 'search', 'openssl'], - { NVD_BASE_URL: server.url }, - ) + const {stdout, exitCode} = await runCli(['vuln', 'search', 'openssl'], {NVD_BASE_URL: server.url}) // The command always succeeds (exit 0) even if the env var isn't wired yet // because the MSW mock in vitest intercepts NVD calls expect(exitCode).toBe(0) @@ -72,10 +69,7 @@ describe('dvmi vuln search', () => { }) try { - const { stdout, exitCode } = await runCli( - ['vuln', 'search', 'openssl'], - { NVD_BASE_URL: server.url }, - ) + const {stdout, exitCode} = await runCli(['vuln', 'search', 'openssl'], {NVD_BASE_URL: server.url}) expect(exitCode).toBe(0) // Static table should be present expect(stdout).toMatch(/CVE ID|openssl/i) @@ -91,10 +85,7 @@ describe('dvmi vuln search', () => { // The vitest MSW intercepts fetch — but this is an integration test (runs via execaNode) // so we cannot rely on MSW. Instead just check the flag is accepted and JSON is returned // even if the NVD call fails (no network in CI). We use DVMI_NO_NVD env to short-circuit. - const { stdout, stderr, exitCode } = await runCli( - ['vuln', 'search', 'openssl', '--json'], - {}, - ) + const {stdout, stderr, exitCode} = await runCli(['vuln', 'search', 'openssl', '--json'], {}) // May fail with network error in offline env; just check the flag is parsed if (exitCode === 0) { const data = JSON.parse(stdout) diff --git a/tests/services/ai-config-sync.test.js b/tests/services/ai-config-sync.test.js new file mode 100644 index 0000000..6e28e9c --- /dev/null +++ b/tests/services/ai-config-sync.test.js @@ -0,0 +1,253 @@ +/** + * Service-level integration test: full AI config sync flow. + * + * Creates a real temp directory, seeds fixture files to make a claude-code + * environment detectable, then exercises the full create → deploy → deactivate + * → undeploy → activate → redeploy lifecycle using the real store and deployer. + */ + +import {describe, it, expect, beforeEach, afterEach} from 'vitest' +import {join} from 'node:path' +import {tmpdir} from 'node:os' +import {readFile, mkdir, writeFile, rm} from 'node:fs/promises' +import {existsSync} from 'node:fs' +import {randomUUID} from 'node:crypto' + +import {scanEnvironments} from '../../src/services/ai-env-scanner.js' +import { + loadAIConfig, + addEntry, + deactivateEntry, + activateEntry, + deleteEntry, +} from '../../src/services/ai-config-store.js' +import {deployEntry, undeployEntry} from '../../src/services/ai-env-deployer.js' + +// ────────────────────────────────────────────────────────────────────────────── +// Helpers +// ────────────────────────────────────────────────────────────────────────────── + +function makeTmpDir() { + return join(tmpdir(), `dvmi-sync-test-${Date.now()}-${randomUUID().slice(0, 8)}`) +} + +async function readJson(filePath) { + const raw = await readFile(filePath, 'utf8') + return JSON.parse(raw) +} + +// ────────────────────────────────────────────────────────────────────────────── +// Test suite +// ────────────────────────────────────────────────────────────────────────────── + +describe('AI config sync — full flow', () => { + let tmpDir + let configPath + let originalEnv + + beforeEach(async () => { + tmpDir = makeTmpDir() + configPath = join(tmpDir, 'ai-config.json') + await mkdir(tmpDir, {recursive: true}) + + // Seed CLAUDE.md so claude-code environment is detected + await writeFile(join(tmpDir, 'CLAUDE.md'), '# Test project\n', 'utf8') + + // Override the store path via env var + originalEnv = process.env.DVMI_AI_CONFIG_PATH + process.env.DVMI_AI_CONFIG_PATH = configPath + }) + + afterEach(async () => { + process.env.DVMI_AI_CONFIG_PATH = originalEnv + await rm(tmpDir, {recursive: true, force: true}) + }) + + it('detects claude-code environment after seeding CLAUDE.md', () => { + const envs = scanEnvironments(tmpDir) + const claudeEnv = envs.find((e) => e.id === 'claude-code') + expect(claudeEnv).toBeDefined() + expect(claudeEnv.detected).toBe(true) + }) + + it('create → deploy: writes MCP entry to .mcp.json', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'my-test-server', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'npx', args: ['-y', 'my-test-pkg'], env: {}}, + }) + + expect(entry.id).toBeTruthy() + expect(entry.active).toBe(true) + expect(entry.name).toBe('my-test-server') + + await deployEntry(entry, detectedEnvs, tmpDir) + + const mcpJson = join(tmpDir, '.mcp.json') + expect(existsSync(mcpJson)).toBe(true) + + const parsed = await readJson(mcpJson) + expect(parsed.mcpServers?.['my-test-server']).toBeDefined() + expect(parsed.mcpServers['my-test-server'].command).toBe('npx') + expect(parsed.mcpServers['my-test-server'].args).toEqual(['-y', 'my-test-pkg']) + }) + + it('deactivate → undeploy: removes entry from .mcp.json', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'removable-server', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'node', args: ['server.js'], env: {}}, + }) + + await deployEntry(entry, detectedEnvs, tmpDir) + + // Verify deployed + const mcpJson = join(tmpDir, '.mcp.json') + const before = await readJson(mcpJson) + expect(before.mcpServers?.['removable-server']).toBeDefined() + + // Deactivate + const deactivated = await deactivateEntry(entry.id) + expect(deactivated.active).toBe(false) + + // Undeploy + await undeployEntry(deactivated, detectedEnvs, tmpDir) + + // Verify removed + const after = await readJson(mcpJson) + expect(after.mcpServers?.['removable-server']).toBeUndefined() + }) + + it('activate → redeploy: restores entry in .mcp.json', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'restorable-server', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'python', args: ['-m', 'srv'], env: {}}, + }) + + // Deploy → undeploy → redeploy + await deployEntry(entry, detectedEnvs, tmpDir) + + const deactivated = await deactivateEntry(entry.id) + await undeployEntry(deactivated, detectedEnvs, tmpDir) + + const mcpJson = join(tmpDir, '.mcp.json') + const afterUndeploy = await readJson(mcpJson) + expect(afterUndeploy.mcpServers?.['restorable-server']).toBeUndefined() + + // Re-activate + const reactivated = await activateEntry(entry.id) + expect(reactivated.active).toBe(true) + + // Redeploy + await deployEntry(reactivated, detectedEnvs, tmpDir) + + const afterRedeploy = await readJson(mcpJson) + expect(afterRedeploy.mcpServers?.['restorable-server']).toBeDefined() + expect(afterRedeploy.mcpServers['restorable-server'].command).toBe('python') + }) + + it('delete → undeploy: permanently removes entry from store and filesystem', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'deletable-server', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'deno', args: ['run', 'server.ts'], env: {}}, + }) + + await deployEntry(entry, detectedEnvs, tmpDir) + + const mcpJson = join(tmpDir, '.mcp.json') + const before = await readJson(mcpJson) + expect(before.mcpServers?.['deletable-server']).toBeDefined() + + // Undeploy first (simulating delete flow in the command) + await undeployEntry(entry, detectedEnvs, tmpDir) + await deleteEntry(entry.id) + + // Entry removed from .mcp.json + const after = await readJson(mcpJson) + expect(after.mcpServers?.['deletable-server']).toBeUndefined() + + // Entry removed from store + const store = await loadAIConfig() + const found = store.entries.find((e) => e.id === entry.id) + expect(found).toBeUndefined() + }) + + it('deploy command entry: writes markdown file to .claude/commands/', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'my-command', + type: 'command', + environments: ['claude-code'], + params: { + description: 'A test command', + content: 'Do something useful.', + }, + }) + + await deployEntry(entry, detectedEnvs, tmpDir) + + const cmdFile = join(tmpDir, '.claude', 'commands', 'my-command.md') + expect(existsSync(cmdFile)).toBe(true) + + const content = await readFile(cmdFile, 'utf8') + // deployer writes params.content directly (not the description) + expect(content).toContain('Do something useful.') + }) + + it('undeploy command entry: removes the markdown file', async () => { + const detectedEnvs = scanEnvironments(tmpDir) + + const entry = await addEntry({ + name: 'removable-command', + type: 'command', + environments: ['claude-code'], + params: {description: 'Temp command', content: 'Content here.'}, + }) + + await deployEntry(entry, detectedEnvs, tmpDir) + + const cmdFile = join(tmpDir, '.claude', 'commands', 'removable-command.md') + expect(existsSync(cmdFile)).toBe(true) + + const deactivated = await deactivateEntry(entry.id) + await undeployEntry(deactivated, detectedEnvs, tmpDir) + + expect(existsSync(cmdFile)).toBe(false) + }) + + it('store persists multiple entries across reloads', async () => { + await addEntry({ + name: 'server-a', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'node', args: ['a.js'], env: {}}, + }) + + await addEntry({ + name: 'server-b', + type: 'mcp', + environments: ['claude-code'], + params: {transport: 'stdio', command: 'node', args: ['b.js'], env: {}}, + }) + + const store = await loadAIConfig() + expect(store.entries).toHaveLength(2) + expect(store.entries.map((e) => e.name)).toContain('server-a') + expect(store.entries.map((e) => e.name)).toContain('server-b') + }) +}) diff --git a/tests/services/audit-runner.test.js b/tests/services/audit-runner.test.js index 459bf4a..dd37679 100644 --- a/tests/services/audit-runner.test.js +++ b/tests/services/audit-runner.test.js @@ -1,42 +1,92 @@ -import { describe, it, expect } from 'vitest' -import { readFileSync } from 'node:fs' -import { resolve, dirname } from 'node:path' -import { fileURLToPath } from 'node:url' +import {describe, it, expect} from 'vitest' +import {readFileSync} from 'node:fs' +import {resolve, dirname} from 'node:path' +import {fileURLToPath} from 'node:url' const __dirname = dirname(fileURLToPath(import.meta.url)) const fixturesDir = resolve(__dirname, '../fixtures/audit-outputs') describe('normalizeSeverity', () => { it('maps "critical" → Critical', async () => { - const { normalizeSeverity } = await import('../../src/services/audit-runner.js') + const {normalizeSeverity} = await import('../../src/services/audit-runner.js') expect(normalizeSeverity('critical')).toBe('Critical') }) it('maps "moderate" → Medium', async () => { - const { normalizeSeverity } = await import('../../src/services/audit-runner.js') + const {normalizeSeverity} = await import('../../src/services/audit-runner.js') expect(normalizeSeverity('moderate')).toBe('Medium') }) it('maps "info" → Low', async () => { - const { normalizeSeverity } = await import('../../src/services/audit-runner.js') + const {normalizeSeverity} = await import('../../src/services/audit-runner.js') expect(normalizeSeverity('info')).toBe('Low') }) it('returns Unknown for undefined', async () => { - const { normalizeSeverity } = await import('../../src/services/audit-runner.js') + const {normalizeSeverity} = await import('../../src/services/audit-runner.js') expect(normalizeSeverity(undefined)).toBe('Unknown') }) }) describe('summarizeFindings', () => { it('counts findings by severity', async () => { - const { summarizeFindings } = await import('../../src/services/audit-runner.js') + const {summarizeFindings} = await import('../../src/services/audit-runner.js') const findings = [ - { package: 'a', installedVersion: '1.0', severity: 'Critical', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'b', installedVersion: '1.0', severity: 'High', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'c', installedVersion: '1.0', severity: 'Medium', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'd', installedVersion: '1.0', severity: 'Low', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'e', installedVersion: '1.0', severity: 'Unknown', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, + { + package: 'a', + installedVersion: '1.0', + severity: 'Critical', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'b', + installedVersion: '1.0', + severity: 'High', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'c', + installedVersion: '1.0', + severity: 'Medium', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'd', + installedVersion: '1.0', + severity: 'Low', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'e', + installedVersion: '1.0', + severity: 'Unknown', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, ] const summary = summarizeFindings(findings) expect(summary.critical).toBe(1) @@ -48,7 +98,7 @@ describe('summarizeFindings', () => { }) it('returns all zeros for empty findings', async () => { - const { summarizeFindings } = await import('../../src/services/audit-runner.js') + const {summarizeFindings} = await import('../../src/services/audit-runner.js') const summary = summarizeFindings([]) expect(summary.total).toBe(0) expect(summary.critical).toBe(0) @@ -57,33 +107,73 @@ describe('summarizeFindings', () => { describe('filterBySeverity', () => { const findings = [ - { package: 'a', installedVersion: '1.0', severity: 'Critical', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'b', installedVersion: '1.0', severity: 'High', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'c', installedVersion: '1.0', severity: 'Medium', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, - { package: 'd', installedVersion: '1.0', severity: 'Low', cveId: null, advisoryUrl: null, title: null, patchedVersions: null, ecosystem: 'npm', isDirect: null }, + { + package: 'a', + installedVersion: '1.0', + severity: 'Critical', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'b', + installedVersion: '1.0', + severity: 'High', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'c', + installedVersion: '1.0', + severity: 'Medium', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, + { + package: 'd', + installedVersion: '1.0', + severity: 'Low', + cveId: null, + advisoryUrl: null, + title: null, + patchedVersions: null, + ecosystem: 'npm', + isDirect: null, + }, ] it('returns all findings when no filter', async () => { - const { filterBySeverity } = await import('../../src/services/audit-runner.js') + const {filterBySeverity} = await import('../../src/services/audit-runner.js') expect(filterBySeverity(findings, undefined)).toHaveLength(4) }) it('filters to high and above when minSeverity=high', async () => { - const { filterBySeverity } = await import('../../src/services/audit-runner.js') + const {filterBySeverity} = await import('../../src/services/audit-runner.js') const result = filterBySeverity(findings, 'high') expect(result).toHaveLength(2) expect(result.map((f) => f.severity)).toEqual(['Critical', 'High']) }) it('filters to only critical when minSeverity=critical', async () => { - const { filterBySeverity } = await import('../../src/services/audit-runner.js') + const {filterBySeverity} = await import('../../src/services/audit-runner.js') const result = filterBySeverity(findings, 'critical') expect(result).toHaveLength(1) expect(result[0].severity).toBe('Critical') }) it('returns all when minSeverity=low', async () => { - const { filterBySeverity } = await import('../../src/services/audit-runner.js') + const {filterBySeverity} = await import('../../src/services/audit-runner.js') const result = filterBySeverity(findings, 'low') expect(result).toHaveLength(4) }) @@ -95,7 +185,7 @@ describe('filterBySeverity', () => { describe('pnpm audit fixture parsing', () => { it('parses pnpm-audit.json fixture correctly via runAudit stub', async () => { - const { normalizeSeverity } = await import('../../src/services/audit-runner.js') + const {normalizeSeverity} = await import('../../src/services/audit-runner.js') // Test the normalization logic which drives pnpm parsing expect(normalizeSeverity('critical')).toBe('Critical') diff --git a/tests/services/auth.test.js b/tests/services/auth.test.js index 89d62b7..1b37ede 100644 --- a/tests/services/auth.test.js +++ b/tests/services/auth.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' // Mock shell service vi.mock('../../src/services/shell.js', () => ({ @@ -8,7 +8,7 @@ vi.mock('../../src/services/shell.js', () => ({ })) vi.mock('../../src/services/config.js', () => ({ - loadConfig: vi.fn().mockResolvedValue({ org: 'acme', awsProfile: 'dev', awsRegion: 'eu-west-1' }), + loadConfig: vi.fn().mockResolvedValue({org: 'acme', awsProfile: 'dev', awsRegion: 'eu-west-1'}), saveConfig: vi.fn(), configExists: vi.fn().mockReturnValue(true), CONFIG_PATH: '/tmp/dvmi-test/config.json', @@ -18,26 +18,26 @@ describe('checkGitHubAuth', () => { beforeEach(() => vi.clearAllMocks()) it('returns authenticated when gh auth status succeeds', async () => { - const { exec } = await import('../../src/services/shell.js') + const {exec} = await import('../../src/services/shell.js') vi.mocked(exec).mockResolvedValue({ stdout: '', stderr: 'Logged in to github.com as testdev (oauth token)', exitCode: 0, }) - const { checkGitHubAuth } = await import('../../src/services/auth.js') + const {checkGitHubAuth} = await import('../../src/services/auth.js') const result = await checkGitHubAuth() expect(result.authenticated).toBe(true) expect(result.username).toBe('testdev') }) it('returns not authenticated when gh auth status fails', async () => { - const { exec } = await import('../../src/services/shell.js') + const {exec} = await import('../../src/services/shell.js') vi.mocked(exec).mockResolvedValue({ stdout: '', stderr: 'You are not logged into any GitHub hosts', exitCode: 1, }) - const { checkGitHubAuth } = await import('../../src/services/auth.js') + const {checkGitHubAuth} = await import('../../src/services/auth.js') const result = await checkGitHubAuth() expect(result.authenticated).toBe(false) }) @@ -47,13 +47,17 @@ describe('checkAWSAuth', () => { beforeEach(() => vi.clearAllMocks()) it('returns authenticated with account info on success', async () => { - const { exec } = await import('../../src/services/shell.js') + const {exec} = await import('../../src/services/shell.js') vi.mocked(exec).mockResolvedValue({ - stdout: JSON.stringify({ Account: '123456789012', Arn: 'arn:aws:sts::123456789012:assumed-role/dev/user', UserId: 'X' }), + stdout: JSON.stringify({ + Account: '123456789012', + Arn: 'arn:aws:sts::123456789012:assumed-role/dev/user', + UserId: 'X', + }), stderr: '', exitCode: 0, }) - const { checkAWSAuth } = await import('../../src/services/auth.js') + const {checkAWSAuth} = await import('../../src/services/auth.js') const result = await checkAWSAuth() expect(result.authenticated).toBe(true) expect(result.account).toBe('123456789012') @@ -61,9 +65,9 @@ describe('checkAWSAuth', () => { }) it('returns not authenticated when session expired', async () => { - const { exec } = await import('../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: '', stderr: 'ExpiredToken', exitCode: 1 }) - const { checkAWSAuth } = await import('../../src/services/auth.js') + const {exec} = await import('../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: '', stderr: 'ExpiredToken', exitCode: 1}) + const {checkAWSAuth} = await import('../../src/services/auth.js') const result = await checkAWSAuth() expect(result.authenticated).toBe(false) }) diff --git a/tests/services/aws-costs.test.js b/tests/services/aws-costs.test.js index b18096f..935fdfd 100644 --- a/tests/services/aws-costs.test.js +++ b/tests/services/aws-costs.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' // Mock AWS SDK vi.mock('@aws-sdk/client-cost-explorer', () => ({ @@ -12,20 +12,22 @@ describe('getServiceCosts', () => { beforeEach(() => vi.clearAllMocks()) it('returns cost entries with correct structure (service grouping)', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') const mockSend = vi.fn().mockResolvedValue({ - ResultsByTime: [{ - TimePeriod: { Start: '2026-02-01', End: '2026-03-01' }, - Groups: [ - { Keys: ['AWS Lambda'], Metrics: { UnblendedCost: { Amount: '12.34', Unit: 'USD' } } }, - { Keys: ['API Gateway'], Metrics: { UnblendedCost: { Amount: '5.67', Unit: 'USD' } } }, - ], - }], + ResultsByTime: [ + { + TimePeriod: {Start: '2026-02-01', End: '2026-03-01'}, + Groups: [ + {Keys: ['AWS Lambda'], Metrics: {UnblendedCost: {Amount: '12.34', Unit: 'USD'}}}, + {Keys: ['API Gateway'], Metrics: {UnblendedCost: {Amount: '5.67', Unit: 'USD'}}}, + ], + }, + ], }) - vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: mockSend })) + vi.mocked(CostExplorerClient).mockImplementation(() => ({send: mockSend})) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('my-service', { project: 'my-service' }) + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('my-service', {project: 'my-service'}) expect(entries).toHaveLength(2) expect(entries[0].serviceName).toBe('AWS Lambda') @@ -34,31 +36,33 @@ describe('getServiceCosts', () => { }) it('returns empty entries when no costs found', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') vi.mocked(CostExplorerClient).mockImplementation(() => ({ - send: vi.fn().mockResolvedValue({ ResultsByTime: [] }), + send: vi.fn().mockResolvedValue({ResultsByTime: []}), })) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('unknown-service', { project: 'unknown' }) + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('unknown-service', {project: 'unknown'}) expect(entries).toHaveLength(0) }) it('strips tag prefix and returns tagValue for groupBy=tag', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: vi.fn().mockResolvedValue({ - ResultsByTime: [{ - Groups: [ - { Keys: ['env$prod'], Metrics: { UnblendedCost: { Amount: '20.00', Unit: 'USD' } } }, - { Keys: ['env$'], Metrics: { UnblendedCost: { Amount: '5.00', Unit: 'USD' } } }, - ], - }], + ResultsByTime: [ + { + Groups: [ + {Keys: ['env$prod'], Metrics: {UnblendedCost: {Amount: '20.00', Unit: 'USD'}}}, + {Keys: ['env$'], Metrics: {UnblendedCost: {Amount: '5.00', Unit: 'USD'}}}, + ], + }, + ], }), })) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('svc', {}, 'last-month', 'tag', 'env') + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('svc', {}, 'last-month', 'tag', 'env') expect(entries).toHaveLength(2) expect(entries[0].serviceName).toBe('prod') @@ -68,19 +72,19 @@ describe('getServiceCosts', () => { }) it('returns correct row labels for groupBy=both', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: vi.fn().mockResolvedValue({ - ResultsByTime: [{ - Groups: [ - { Keys: ['AWS Lambda', 'env$prod'], Metrics: { UnblendedCost: { Amount: '15.00', Unit: 'USD' } } }, - ], - }], + ResultsByTime: [ + { + Groups: [{Keys: ['AWS Lambda', 'env$prod'], Metrics: {UnblendedCost: {Amount: '15.00', Unit: 'USD'}}}], + }, + ], }), })) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('svc', {}, 'last-month', 'both', 'env') + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('svc', {}, 'last-month', 'both', 'env') expect(entries).toHaveLength(1) expect(entries[0].serviceName).toBe('AWS Lambda') @@ -92,28 +96,24 @@ describe('getTrendCosts', () => { beforeEach(() => vi.clearAllMocks()) it('returns CostTrendSeries[] with daily granularity', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: vi.fn().mockResolvedValue({ ResultsByTime: [ { - TimePeriod: { Start: '2026-01-01' }, - Groups: [ - { Keys: ['AWS Lambda'], Metrics: { UnblendedCost: { Amount: '5.00', Unit: 'USD' } } }, - ], + TimePeriod: {Start: '2026-01-01'}, + Groups: [{Keys: ['AWS Lambda'], Metrics: {UnblendedCost: {Amount: '5.00', Unit: 'USD'}}}], }, { - TimePeriod: { Start: '2026-01-02' }, - Groups: [ - { Keys: ['AWS Lambda'], Metrics: { UnblendedCost: { Amount: '7.00', Unit: 'USD' } } }, - ], + TimePeriod: {Start: '2026-01-02'}, + Groups: [{Keys: ['AWS Lambda'], Metrics: {UnblendedCost: {Amount: '7.00', Unit: 'USD'}}}], }, ], NextPageToken: undefined, }), })) - const { getTrendCosts } = await import('../../src/services/aws-costs.js') + const {getTrendCosts} = await import('../../src/services/aws-costs.js') const series = await getTrendCosts('service') expect(series).toHaveLength(1) @@ -124,26 +124,31 @@ describe('getTrendCosts', () => { }) it('handles NextPageToken pagination', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') - const mockSend = vi.fn() + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') + const mockSend = vi + .fn() .mockResolvedValueOnce({ - ResultsByTime: [{ - TimePeriod: { Start: '2026-01-01' }, - Groups: [{ Keys: ['EC2'], Metrics: { UnblendedCost: { Amount: '3.00', Unit: 'USD' } } }], - }], + ResultsByTime: [ + { + TimePeriod: {Start: '2026-01-01'}, + Groups: [{Keys: ['EC2'], Metrics: {UnblendedCost: {Amount: '3.00', Unit: 'USD'}}}], + }, + ], NextPageToken: 'page2', }) .mockResolvedValueOnce({ - ResultsByTime: [{ - TimePeriod: { Start: '2026-01-02' }, - Groups: [{ Keys: ['EC2'], Metrics: { UnblendedCost: { Amount: '4.00', Unit: 'USD' } } }], - }], + ResultsByTime: [ + { + TimePeriod: {Start: '2026-01-02'}, + Groups: [{Keys: ['EC2'], Metrics: {UnblendedCost: {Amount: '4.00', Unit: 'USD'}}}], + }, + ], NextPageToken: undefined, }) - vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: mockSend })) + vi.mocked(CostExplorerClient).mockImplementation(() => ({send: mockSend})) - const { getTrendCosts } = await import('../../src/services/aws-costs.js') + const {getTrendCosts} = await import('../../src/services/aws-costs.js') const series = await getTrendCosts('service') expect(mockSend).toHaveBeenCalledTimes(2) @@ -154,15 +159,15 @@ describe('getTrendCosts', () => { describe('cost formatting helpers', () => { it('formats currency correctly', async () => { - const { formatCurrency } = await import('../../src/formatters/cost.js') + const {formatCurrency} = await import('../../src/formatters/cost.js') expect(formatCurrency(12.345)).toBe('$12.35') }) it('calculates total', async () => { - const { calculateTotal } = await import('../../src/formatters/cost.js') + const {calculateTotal} = await import('../../src/formatters/cost.js') const entries = [ - { serviceName: 'A', amount: 10, unit: 'USD', period: { start: '', end: '' } }, - { serviceName: 'B', amount: 5.5, unit: 'USD', period: { start: '', end: '' } }, + {serviceName: 'A', amount: 10, unit: 'USD', period: {start: '', end: ''}}, + {serviceName: 'B', amount: 5.5, unit: 'USD', period: {start: '', end: ''}}, ] expect(calculateTotal(entries)).toBeCloseTo(15.5) }) diff --git a/tests/services/changelog.test.js b/tests/services/changelog.test.js index 0d267bb..b79c7b1 100644 --- a/tests/services/changelog.test.js +++ b/tests/services/changelog.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi } from 'vitest' +import {describe, it, expect, vi} from 'vitest' vi.mock('../../src/services/shell.js', () => ({ exec: vi.fn(), @@ -14,7 +14,7 @@ vi.mock('../../src/services/shell.js', () => ({ function parseConventionalCommit(message) { const match = message.match(/^(\w+)(?:\(([^)]+)\))?!?: (.+)/) if (!match) return null - return { type: match[1], scope: match[2] ?? '', description: match[3] } + return {type: match[1], scope: match[2] ?? '', description: match[3]} } describe('parseConventionalCommit', () => { @@ -51,13 +51,8 @@ describe('parseConventionalCommit', () => { describe('changelog grouping', () => { it('groups commits by type', () => { - const commits = [ - 'feat(auth): add login flow', - 'fix: fix timeout', - 'chore: update deps', - 'random message', - ] - const sections = { feat: [], fix: [], chore: [], docs: [], refactor: [], test: [], other: [] } + const commits = ['feat(auth): add login flow', 'fix: fix timeout', 'chore: update deps', 'random message'] + const sections = {feat: [], fix: [], chore: [], docs: [], refactor: [], test: [], other: []} for (const msg of commits) { const parsed = parseConventionalCommit(msg) const type = parsed?.type ?? 'other' diff --git a/tests/services/clickup.test.js b/tests/services/clickup.test.js index 0f2066a..09ccca1 100644 --- a/tests/services/clickup.test.js +++ b/tests/services/clickup.test.js @@ -1,6 +1,6 @@ -import { describe, it, expect, vi, beforeEach, beforeAll, afterAll } from 'vitest' -import { http, HttpResponse } from 'msw' -import { server } from '../setup.js' +import {describe, it, expect, vi, beforeEach, beforeAll, afterAll} from 'vitest' +import {http, HttpResponse} from 'msw' +import {server} from '../setup.js' // Use CLICKUP_TOKEN env var to bypass keytar entirely (platform-safe: works on Linux CI // where libsecret/D-Bus may not be available). getToken() checks this env var first. @@ -20,7 +20,7 @@ vi.mock('keytar', () => ({ })) vi.mock('../../src/services/config.js', () => ({ - loadConfig: vi.fn().mockResolvedValue({ org: 'acme', awsProfile: 'dev', clickup: {} }), + loadConfig: vi.fn().mockResolvedValue({org: 'acme', awsProfile: 'dev', clickup: {}}), saveConfig: vi.fn(), configExists: vi.fn().mockReturnValue(true), CONFIG_PATH: '/tmp/dvmi-test/config.json', @@ -50,14 +50,14 @@ function makeApiTask(overrides = {}) { return { id: 't1', name: 'Test task', - status: { status: 'in progress', type: 'open' }, - priority: { id: '3' }, + status: {status: 'in progress', type: 'open'}, + priority: {id: '3'}, start_date: null, due_date: null, url: 'https://app.clickup.com/t/t1', assignees: [], - list: { id: 'L1', name: 'Sprint 42' }, - folder: { id: 'F1', name: 'Backend', hidden: false }, + list: {id: 'L1', name: 'Sprint 42'}, + folder: {id: 'F1', name: 'Backend', hidden: false}, ...overrides, } } @@ -70,10 +70,10 @@ describe('validateToken()', () => { beforeEach(() => vi.clearAllMocks()) it('returns { valid: true, user } when token is valid', async () => { - const { validateToken } = await import('../../src/services/clickup.js') + const {validateToken} = await import('../../src/services/clickup.js') const result = await validateToken() expect(result.valid).toBe(true) - expect(result.user).toEqual({ id: 42, username: 'testdev' }) + expect(result.user).toEqual({id: 42, username: 'testdev'}) }) it('returns { valid: false } when token retrieval fails (no token stored)', async () => { @@ -85,7 +85,7 @@ describe('validateToken()', () => { try { const keytar = await import('keytar') vi.mocked(keytar.default.getPassword).mockResolvedValueOnce(null) - const { validateToken: validateToken2 } = await import('../../src/services/clickup.js') + const {validateToken: validateToken2} = await import('../../src/services/clickup.js') const result = await validateToken2() expect(result.valid).toBe(false) } finally { @@ -109,11 +109,11 @@ describe('getTasksToday()', () => { server.use( http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ - tasks: [makeApiTask({ id: 't1', status: { status: 'in_progress', type: 'in_progress' } })], + tasks: [makeApiTask({id: 't1', status: {status: 'in_progress', type: 'in_progress'}})], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t1') @@ -128,15 +128,15 @@ describe('getTasksToday()', () => { tasks: [ makeApiTask({ id: 't2', - status: { status: 'in review', type: 'custom' }, + status: {status: 'in review', type: 'custom'}, start_date: localMidnightTimestamp(-1), // yesterday - due_date: localMidnightTimestamp(1), // tomorrow + due_date: localMidnightTimestamp(1), // tomorrow }), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t2') @@ -149,7 +149,7 @@ describe('getTasksToday()', () => { tasks: [ makeApiTask({ id: 't3', - status: { status: 'open', type: 'open' }, + status: {status: 'open', type: 'open'}, start_date: localMidnightTimestamp(0), // today due_date: localMidnightTimestamp(3), }), @@ -157,7 +157,7 @@ describe('getTasksToday()', () => { }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t3') @@ -170,7 +170,7 @@ describe('getTasksToday()', () => { tasks: [ makeApiTask({ id: 't4', - status: { status: 'todo', type: 'open' }, + status: {status: 'todo', type: 'open'}, start_date: null, due_date: localMidnightTimestamp(0), // today }), @@ -178,7 +178,7 @@ describe('getTasksToday()', () => { }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t4') @@ -193,14 +193,14 @@ describe('getTasksToday()', () => { tasks: [ makeApiTask({ id: 't5', - status: { status: 'in progress', type: 'in_progress' }, + status: {status: 'in progress', type: 'in_progress'}, due_date: localMidnightTimestamp(-3), // 3 days ago }), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t5') @@ -214,14 +214,18 @@ describe('getTasksToday()', () => { HttpResponse.json({ tasks: [ // Italian "FATTO" with ClickUp type "closed" - makeApiTask({ id: 't6', status: { status: 'FATTO', type: 'closed' }, due_date: localMidnightTimestamp(0) }), + makeApiTask({id: 't6', status: {status: 'FATTO', type: 'closed'}, due_date: localMidnightTimestamp(0)}), // English "completed" with ClickUp type "closed" - makeApiTask({ id: 't7', status: { status: 'completed', type: 'closed' }, due_date: localMidnightTimestamp(-1) }), + makeApiTask({ + id: 't7', + status: {status: 'completed', type: 'closed'}, + due_date: localMidnightTimestamp(-1), + }), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(0) }) @@ -231,12 +235,16 @@ describe('getTasksToday()', () => { http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ tasks: [ - makeApiTask({ id: 't8', status: { status: 'COMPLETATO', type: 'closed' }, due_date: localMidnightTimestamp(-5) }), + makeApiTask({ + id: 't8', + status: {status: 'COMPLETATO', type: 'closed'}, + due_date: localMidnightTimestamp(-5), + }), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(0) }) @@ -250,7 +258,7 @@ describe('getTasksToday()', () => { tasks: [ makeApiTask({ id: 't9', - status: { status: 'todo', type: 'open' }, + status: {status: 'todo', type: 'open'}, start_date: localMidnightTimestamp(1), due_date: localMidnightTimestamp(5), }), @@ -258,7 +266,7 @@ describe('getTasksToday()', () => { }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(0) }) @@ -268,12 +276,12 @@ describe('getTasksToday()', () => { http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ tasks: [ - makeApiTask({ id: 't10', status: { status: 'todo', type: 'open' }, due_date: localMidnightTimestamp(2) }), + makeApiTask({id: 't10', status: {status: 'todo', type: 'open'}, due_date: localMidnightTimestamp(2)}), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(0) }) @@ -288,12 +296,12 @@ describe('getTasksToday()', () => { http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ tasks: [ - makeApiTask({ id: 't11', status: { status: 'todo', type: 'open' }, due_date: String(localMidnight.getTime()) }), + makeApiTask({id: 't11', status: {status: 'todo', type: 'open'}, due_date: String(localMidnight.getTime())}), ], }), ), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('t11') @@ -308,7 +316,7 @@ describe('getTeams()', () => { beforeEach(() => vi.clearAllMocks()) it('returns array of { id, name } when teams exist', async () => { - const { getTeams } = await import('../../src/services/clickup.js') + const {getTeams} = await import('../../src/services/clickup.js') const teams = await getTeams() expect(Array.isArray(teams)).toBe(true) expect(teams.length).toBeGreaterThan(0) @@ -322,7 +330,7 @@ describe('getTeams()', () => { // MSW returns one team by default; this tests the mapping logic with a custom response. // Since MSW can't be easily overridden per-test here, we verify the non-empty case above // and trust the mapping logic: `(data.teams ?? []).map(...)` handles empty arrays. - const { getTeams } = await import('../../src/services/clickup.js') + const {getTeams} = await import('../../src/services/clickup.js') const teams = await getTeams() // At minimum, mapping must return an array expect(Array.isArray(teams)).toBe(true) @@ -342,12 +350,14 @@ describe('getTasks() — list/folder mapping', () => { server.use( http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ - tasks: [makeApiTask({ list: { id: 'L42', name: 'Sprint 42' }, folder: { id: 'F1', name: 'Backend', hidden: false } })], + tasks: [ + makeApiTask({list: {id: 'L42', name: 'Sprint 42'}, folder: {id: 'F1', name: 'Backend', hidden: false}}), + ], has_more: false, }), ), ) - const { getTasks } = await import('../../src/services/clickup.js') + const {getTasks} = await import('../../src/services/clickup.js') const tasks = await getTasks('12345') expect(tasks[0].listId).toBe('L42') expect(tasks[0].listName).toBe('Sprint 42') @@ -357,12 +367,12 @@ describe('getTasks() — list/folder mapping', () => { server.use( http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ - tasks: [makeApiTask({ folder: { id: 'F99', name: 'Frontend', hidden: false } })], + tasks: [makeApiTask({folder: {id: 'F99', name: 'Frontend', hidden: false}})], has_more: false, }), ), ) - const { getTasks } = await import('../../src/services/clickup.js') + const {getTasks} = await import('../../src/services/clickup.js') const tasks = await getTasks('12345') expect(tasks[0].folderId).toBe('F99') expect(tasks[0].folderName).toBe('Frontend') @@ -372,12 +382,12 @@ describe('getTasks() — list/folder mapping', () => { server.use( http.get('https://api.clickup.com/api/v2/team/:teamId/task', () => HttpResponse.json({ - tasks: [makeApiTask({ folder: { hidden: true } })], + tasks: [makeApiTask({folder: {hidden: true}})], has_more: false, }), ), ) - const { getTasks } = await import('../../src/services/clickup.js') + const {getTasks} = await import('../../src/services/clickup.js') const tasks = await getTasks('12345') expect(tasks[0].folderId).toBeNull() expect(tasks[0].folderName).toBeNull() @@ -392,23 +402,23 @@ describe('getTasks() — pagination', () => { it('fetches all pages when has_more=true on first page', async () => { let callCount = 0 server.use( - http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({request}) => { const url = new URL(request.url) const page = Number(url.searchParams.get('page') ?? '0') callCount++ if (page === 0) { return HttpResponse.json({ - tasks: [makeApiTask({ id: 'page0-t1' }), makeApiTask({ id: 'page0-t2' }), makeApiTask({ id: 'page0-t3' })], + tasks: [makeApiTask({id: 'page0-t1'}), makeApiTask({id: 'page0-t2'}), makeApiTask({id: 'page0-t3'})], has_more: true, }) } return HttpResponse.json({ - tasks: [makeApiTask({ id: 'page1-t1' }), makeApiTask({ id: 'page1-t2' })], + tasks: [makeApiTask({id: 'page1-t1'}), makeApiTask({id: 'page1-t2'})], has_more: false, }) }), ) - const { getTasks } = await import('../../src/services/clickup.js') + const {getTasks} = await import('../../src/services/clickup.js') const tasks = await getTasks('12345') expect(tasks.length).toBe(5) expect(callCount).toBe(2) @@ -417,16 +427,16 @@ describe('getTasks() — pagination', () => { it('calls onProgress callback with cumulative count after each page', async () => { server.use( - http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({request}) => { const url = new URL(request.url) const page = Number(url.searchParams.get('page') ?? '0') if (page === 0) { - return HttpResponse.json({ tasks: [makeApiTask({ id: 'p0' }), makeApiTask({ id: 'p0b' })], has_more: true }) + return HttpResponse.json({tasks: [makeApiTask({id: 'p0'}), makeApiTask({id: 'p0b'})], has_more: true}) } - return HttpResponse.json({ tasks: [makeApiTask({ id: 'p1' })], has_more: false }) + return HttpResponse.json({tasks: [makeApiTask({id: 'p1'})], has_more: false}) }), ) - const { getTasks } = await import('../../src/services/clickup.js') + const {getTasks} = await import('../../src/services/clickup.js') const progressCounts = [] await getTasks('12345', {}, (count) => progressCounts.push(count)) expect(progressCounts).toEqual([2, 3]) @@ -445,7 +455,7 @@ describe('getTasksToday() — parallel calls and deduplication', () => { it('deduplicates tasks that appear in both due-date and in-progress calls', async () => { // Both calls return the same task ID 'shared-1' server.use( - http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/team/:teamId/task', ({request}) => { const url = new URL(request.url) const hasDueDateLt = url.searchParams.has('due_date_lt') const hasStatus = url.searchParams.has('statuses[]') @@ -454,8 +464,8 @@ describe('getTasksToday() — parallel calls and deduplication', () => { // due_date_lt call: returns shared task + an exclusive overdue task return HttpResponse.json({ tasks: [ - makeApiTask({ id: 'shared-1', due_date: localMidnightTimestamp(-1) }), - makeApiTask({ id: 'overdue-only', due_date: localMidnightTimestamp(-2) }), + makeApiTask({id: 'shared-1', due_date: localMidnightTimestamp(-1)}), + makeApiTask({id: 'overdue-only', due_date: localMidnightTimestamp(-2)}), ], has_more: false, }) @@ -464,16 +474,16 @@ describe('getTasksToday() — parallel calls and deduplication', () => { // in-progress call: returns shared task + an exclusive in-progress task return HttpResponse.json({ tasks: [ - makeApiTask({ id: 'shared-1', status: { status: 'in progress', type: 'in_progress' } }), - makeApiTask({ id: 'inprogress-only', status: { status: 'in progress', type: 'in_progress' } }), + makeApiTask({id: 'shared-1', status: {status: 'in progress', type: 'in_progress'}}), + makeApiTask({id: 'inprogress-only', status: {status: 'in progress', type: 'in_progress'}}), ], has_more: false, }) } - return HttpResponse.json({ tasks: [], has_more: false }) + return HttpResponse.json({tasks: [], has_more: false}) }), ) - const { getTasksToday } = await import('../../src/services/clickup.js') + const {getTasksToday} = await import('../../src/services/clickup.js') const tasks = await getTasksToday('12345') const ids = tasks.map((t) => t.id) // shared-1 must appear exactly once @@ -493,12 +503,12 @@ describe('getTasksByList()', () => { it('calls /v2/list/{listId}/task endpoint (not /team/...)', async () => { let capturedUrl = '' server.use( - http.get('https://api.clickup.com/api/v2/list/:listId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/list/:listId/task', ({request}) => { capturedUrl = request.url - return HttpResponse.json({ tasks: [makeApiTask({ id: 'lt1' })], has_more: false }) + return HttpResponse.json({tasks: [makeApiTask({id: 'lt1'})], has_more: false}) }), ) - const { getTasksByList } = await import('../../src/services/clickup.js') + const {getTasksByList} = await import('../../src/services/clickup.js') const tasks = await getTasksByList('L99') expect(tasks.length).toBe(1) expect(tasks[0].id).toBe('lt1') @@ -508,20 +518,20 @@ describe('getTasksByList()', () => { it('paginates correctly across multiple pages', async () => { server.use( - http.get('https://api.clickup.com/api/v2/list/:listId/task', ({ request }) => { + http.get('https://api.clickup.com/api/v2/list/:listId/task', ({request}) => { const url = new URL(request.url) const page = Number(url.searchParams.get('page') ?? '0') - if (page === 0) return HttpResponse.json({ tasks: [makeApiTask({ id: 'l-p0' })], has_more: true }) - return HttpResponse.json({ tasks: [makeApiTask({ id: 'l-p1' })], has_more: false }) + if (page === 0) return HttpResponse.json({tasks: [makeApiTask({id: 'l-p0'})], has_more: true}) + return HttpResponse.json({tasks: [makeApiTask({id: 'l-p1'})], has_more: false}) }), ) - const { getTasksByList } = await import('../../src/services/clickup.js') + const {getTasksByList} = await import('../../src/services/clickup.js') const tasks = await getTasksByList('L99') expect(tasks.map((t) => t.id)).toEqual(['l-p0', 'l-p1']) }) it('throws user-friendly error on 404', async () => { - const { getTasksByList } = await import('../../src/services/clickup.js') + const {getTasksByList} = await import('../../src/services/clickup.js') await expect(getTasksByList('NOTFOUND')).rejects.toThrow('Lista non trovata o non accessibile') }) @@ -529,12 +539,14 @@ describe('getTasksByList()', () => { server.use( http.get('https://api.clickup.com/api/v2/list/:listId/task', () => HttpResponse.json({ - tasks: [makeApiTask({ list: { id: 'L5', name: 'My List' }, folder: { id: 'F5', name: 'My Folder', hidden: false } })], + tasks: [ + makeApiTask({list: {id: 'L5', name: 'My List'}, folder: {id: 'F5', name: 'My Folder', hidden: false}}), + ], has_more: false, }), ), ) - const { getTasksByList } = await import('../../src/services/clickup.js') + const {getTasksByList} = await import('../../src/services/clickup.js') const tasks = await getTasksByList('L5') expect(tasks[0].listId).toBe('L5') expect(tasks[0].listName).toBe('My List') diff --git a/tests/services/cloudwatch-logs.test.js b/tests/services/cloudwatch-logs.test.js index bf5e3f3..24f3c09 100644 --- a/tests/services/cloudwatch-logs.test.js +++ b/tests/services/cloudwatch-logs.test.js @@ -1,10 +1,10 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' -import { sinceToEpochMs } from '../../src/services/cloudwatch-logs.js' +import {describe, it, expect, vi, beforeEach} from 'vitest' +import {sinceToEpochMs} from '../../src/services/cloudwatch-logs.js' // Mock the CloudWatch Logs SDK vi.mock('@aws-sdk/client-cloudwatch-logs', () => { return { - CloudWatchLogsClient: vi.fn().mockImplementation(() => ({ send: vi.fn() })), + CloudWatchLogsClient: vi.fn().mockImplementation(() => ({send: vi.fn()})), paginateDescribeLogGroups: vi.fn(), FilterLogEventsCommand: vi.fn(), } @@ -13,7 +13,7 @@ vi.mock('@aws-sdk/client-cloudwatch-logs', () => { describe('sinceToEpochMs', () => { it('returns a startTime roughly 1 hour ago for "1h"', () => { const before = Date.now() - const { startTime, endTime } = sinceToEpochMs('1h') + const {startTime, endTime} = sinceToEpochMs('1h') const after = Date.now() expect(endTime).toBeGreaterThanOrEqual(before) @@ -22,12 +22,12 @@ describe('sinceToEpochMs', () => { }) it('returns a startTime roughly 24 hours ago for "24h"', () => { - const { startTime, endTime } = sinceToEpochMs('24h') + const {startTime, endTime} = sinceToEpochMs('24h') expect(endTime - startTime).toBeCloseTo(24 * 60 * 60 * 1000, -2) }) it('returns a startTime roughly 7 days ago for "7d"', () => { - const { startTime, endTime } = sinceToEpochMs('7d') + const {startTime, endTime} = sinceToEpochMs('7d') expect(endTime - startTime).toBeCloseTo(7 * 24 * 60 * 60 * 1000, -2) }) @@ -41,26 +41,24 @@ describe('listLogGroups', () => { beforeEach(() => vi.clearAllMocks()) it('returns paginated log groups as LogGroup[]', async () => { - const { paginateDescribeLogGroups } = await import('@aws-sdk/client-cloudwatch-logs') + const {paginateDescribeLogGroups} = await import('@aws-sdk/client-cloudwatch-logs') // Mock async iterable paginator returning 2 pages vi.mocked(paginateDescribeLogGroups).mockReturnValue( (async function* () { yield { logGroups: [ - { logGroupName: '/aws/lambda/fn-a', storedBytes: 1024, retentionInDays: 30, creationTime: 1711234567890 }, - { logGroupName: '/aws/lambda/fn-b', storedBytes: 2048 }, + {logGroupName: '/aws/lambda/fn-a', storedBytes: 1024, retentionInDays: 30, creationTime: 1711234567890}, + {logGroupName: '/aws/lambda/fn-b', storedBytes: 2048}, ], } yield { - logGroups: [ - { logGroupName: '/aws/lambda/fn-c' }, - ], + logGroups: [{logGroupName: '/aws/lambda/fn-c'}], } })(), ) - const { listLogGroups } = await import('../../src/services/cloudwatch-logs.js') + const {listLogGroups} = await import('../../src/services/cloudwatch-logs.js') const groups = await listLogGroups('eu-west-1') expect(groups).toHaveLength(3) @@ -77,17 +75,17 @@ describe('filterLogEvents', () => { beforeEach(() => vi.clearAllMocks()) it('returns a LogFilterResult with events', async () => { - const { CloudWatchLogsClient } = await import('@aws-sdk/client-cloudwatch-logs') + const {CloudWatchLogsClient} = await import('@aws-sdk/client-cloudwatch-logs') const mockSend = vi.fn().mockResolvedValue({ events: [ - { eventId: 'e1', logStreamName: 'stream-a', timestamp: 1711234567890, message: 'hello world' }, - { eventId: 'e2', logStreamName: 'stream-a', timestamp: 1711234568000, message: 'second event' }, + {eventId: 'e1', logStreamName: 'stream-a', timestamp: 1711234567890, message: 'hello world'}, + {eventId: 'e2', logStreamName: 'stream-a', timestamp: 1711234568000, message: 'second event'}, ], nextToken: undefined, }) - vi.mocked(CloudWatchLogsClient).mockImplementation(() => ({ send: mockSend })) + vi.mocked(CloudWatchLogsClient).mockImplementation(() => ({send: mockSend})) - const { filterLogEvents } = await import('../../src/services/cloudwatch-logs.js') + const {filterLogEvents} = await import('../../src/services/cloudwatch-logs.js') const result = await filterLogEvents('/aws/lambda/fn-a', 'ERROR', 1000, 2000, 100) expect(result.events).toHaveLength(2) @@ -99,18 +97,18 @@ describe('filterLogEvents', () => { }) it('sets truncated=true when events count equals the limit', async () => { - const { CloudWatchLogsClient } = await import('@aws-sdk/client-cloudwatch-logs') - const events = Array.from({ length: 5 }, (_, i) => ({ + const {CloudWatchLogsClient} = await import('@aws-sdk/client-cloudwatch-logs') + const events = Array.from({length: 5}, (_, i) => ({ eventId: `e${i}`, logStreamName: 'stream', timestamp: 1000 + i, message: `msg ${i}`, })) vi.mocked(CloudWatchLogsClient).mockImplementation(() => ({ - send: vi.fn().mockResolvedValue({ events, nextToken: undefined }), + send: vi.fn().mockResolvedValue({events, nextToken: undefined}), })) - const { filterLogEvents } = await import('../../src/services/cloudwatch-logs.js') + const {filterLogEvents} = await import('../../src/services/cloudwatch-logs.js') const result = await filterLogEvents('/aws/lambda/fn-a', '', 0, 9999, 5) expect(result.truncated).toBe(true) @@ -118,15 +116,15 @@ describe('filterLogEvents', () => { }) it('sets truncated=true when nextToken is present', async () => { - const { CloudWatchLogsClient } = await import('@aws-sdk/client-cloudwatch-logs') + const {CloudWatchLogsClient} = await import('@aws-sdk/client-cloudwatch-logs') vi.mocked(CloudWatchLogsClient).mockImplementation(() => ({ send: vi.fn().mockResolvedValue({ - events: [{ eventId: 'x', logStreamName: 's', timestamp: 0, message: 'm' }], + events: [{eventId: 'x', logStreamName: 's', timestamp: 0, message: 'm'}], nextToken: 'token123', }), })) - const { filterLogEvents } = await import('../../src/services/cloudwatch-logs.js') + const {filterLogEvents} = await import('../../src/services/cloudwatch-logs.js') const result = await filterLogEvents('/aws/lambda/fn-a', '', 0, 9999, 100) expect(result.truncated).toBe(true) }) diff --git a/tests/services/config.test.js b/tests/services/config.test.js index c78ed04..1df0fa8 100644 --- a/tests/services/config.test.js +++ b/tests/services/config.test.js @@ -1,12 +1,12 @@ -import { describe, it, expect, beforeEach, vi } from 'vitest' -import { vol } from 'memfs' +import {describe, it, expect, beforeEach, vi} from 'vitest' +import {vol} from 'memfs' vi.mock('node:fs', async () => { - const { fs } = await import('memfs') + const {fs} = await import('memfs') return fs }) vi.mock('node:fs/promises', async () => { - const { fs } = await import('memfs') + const {fs} = await import('memfs') return fs.promises }) @@ -16,15 +16,15 @@ describe('loadConfig', () => { beforeEach(() => vol.reset()) it('returns defaults when config missing', async () => { - const { loadConfig } = await import('../../src/services/config.js') + const {loadConfig} = await import('../../src/services/config.js') const config = await loadConfig(CONFIG_PATH) expect(config.org).toBe('') expect(config.awsRegion).toBe('eu-west-1') }) it('reads existing config', async () => { - vol.fromJSON({ [CONFIG_PATH]: JSON.stringify({ org: 'acme', awsProfile: 'dev' }) }) - const { loadConfig } = await import('../../src/services/config.js') + vol.fromJSON({[CONFIG_PATH]: JSON.stringify({org: 'acme', awsProfile: 'dev'})}) + const {loadConfig} = await import('../../src/services/config.js') const config = await loadConfig(CONFIG_PATH) expect(config.org).toBe('acme') expect(config.awsProfile).toBe('dev') @@ -35,8 +35,8 @@ describe('saveConfig', () => { beforeEach(() => vol.reset()) it('creates config file', async () => { - const { saveConfig, loadConfig } = await import('../../src/services/config.js') - await saveConfig({ org: 'test', awsProfile: 'prod', awsRegion: 'us-east-1' }, CONFIG_PATH) + const {saveConfig, loadConfig} = await import('../../src/services/config.js') + await saveConfig({org: 'test', awsProfile: 'prod', awsRegion: 'us-east-1'}, CONFIG_PATH) const config = await loadConfig(CONFIG_PATH) expect(config.org).toBe('test') }) diff --git a/tests/services/costs.test.js b/tests/services/costs.test.js index 8aa162d..6566622 100644 --- a/tests/services/costs.test.js +++ b/tests/services/costs.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' -import { formatCurrency, calculateTotal } from '../../src/formatters/cost.js' +import {describe, it, expect, vi, beforeEach} from 'vitest' +import {formatCurrency, calculateTotal} from '../../src/formatters/cost.js' // Mock AWS SDK vi.mock('@aws-sdk/client-cost-explorer', () => ({ @@ -13,20 +13,22 @@ describe('getServiceCosts', () => { beforeEach(() => vi.clearAllMocks()) it('returns cost entries with correct structure', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') const mockSend = vi.fn().mockResolvedValue({ - ResultsByTime: [{ - TimePeriod: { Start: '2026-02-01', End: '2026-03-01' }, - Groups: [ - { Keys: ['AWS Lambda'], Metrics: { UnblendedCost: { Amount: '12.34', Unit: 'USD' } } }, - { Keys: ['API Gateway'], Metrics: { UnblendedCost: { Amount: '5.67', Unit: 'USD' } } }, - ], - }], + ResultsByTime: [ + { + TimePeriod: {Start: '2026-02-01', End: '2026-03-01'}, + Groups: [ + {Keys: ['AWS Lambda'], Metrics: {UnblendedCost: {Amount: '12.34', Unit: 'USD'}}}, + {Keys: ['API Gateway'], Metrics: {UnblendedCost: {Amount: '5.67', Unit: 'USD'}}}, + ], + }, + ], }) - vi.mocked(CostExplorerClient).mockImplementation(() => ({ send: mockSend })) + vi.mocked(CostExplorerClient).mockImplementation(() => ({send: mockSend})) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('my-service', { project: 'my-service' }) + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('my-service', {project: 'my-service'}) expect(entries).toHaveLength(2) expect(entries[0].serviceName).toBe('AWS Lambda') @@ -35,13 +37,13 @@ describe('getServiceCosts', () => { }) it('returns empty entries when no costs found', async () => { - const { CostExplorerClient } = await import('@aws-sdk/client-cost-explorer') + const {CostExplorerClient} = await import('@aws-sdk/client-cost-explorer') vi.mocked(CostExplorerClient).mockImplementation(() => ({ - send: vi.fn().mockResolvedValue({ ResultsByTime: [] }), + send: vi.fn().mockResolvedValue({ResultsByTime: []}), })) - const { getServiceCosts } = await import('../../src/services/aws-costs.js') - const { entries } = await getServiceCosts('unknown-service', { project: 'unknown' }) + const {getServiceCosts} = await import('../../src/services/aws-costs.js') + const {entries} = await getServiceCosts('unknown-service', {project: 'unknown'}) expect(entries).toHaveLength(0) }) }) @@ -53,8 +55,8 @@ describe('cost formatting helpers', () => { it('calculates total', () => { const entries = [ - { serviceName: 'A', amount: 10, unit: 'USD', period: { start: '', end: '' } }, - { serviceName: 'B', amount: 5.5, unit: 'USD', period: { start: '', end: '' } }, + {serviceName: 'A', amount: 10, unit: 'USD', period: {start: '', end: ''}}, + {serviceName: 'B', amount: 5.5, unit: 'USD', period: {start: '', end: ''}}, ] expect(calculateTotal(entries)).toBeCloseTo(15.5) }) diff --git a/tests/services/docs.test.js b/tests/services/docs.test.js index c674edd..84185e5 100644 --- a/tests/services/docs.test.js +++ b/tests/services/docs.test.js @@ -1,47 +1,45 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' -import { http, HttpResponse } from 'msw' -import { server } from '../setup.js' +import {describe, it, expect, vi, beforeEach} from 'vitest' +import {http, HttpResponse} from 'msw' +import {server} from '../setup.js' // Mock exec so detectCurrentRepo returns a fake GitHub remote vi.mock('../../src/services/shell.js', () => ({ exec: vi.fn(async (cmd, args) => { if (cmd === 'gh' && args[0] === 'auth') { - return { stdout: 'test-token', stderr: '', exitCode: 0 } + return {stdout: 'test-token', stderr: '', exitCode: 0} } if (cmd === 'git' && args[0] === 'remote') { - return { stdout: 'https://github.com/my-org/my-repo.git', stderr: '', exitCode: 0 } + return {stdout: 'https://github.com/my-org/my-repo.git', stderr: '', exitCode: 0} } - return { stdout: '', stderr: 'unknown command', exitCode: 1 } + return {stdout: '', stderr: 'unknown command', exitCode: 1} }), })) const TREE_RESPONSE = { truncated: false, tree: [ - { type: 'blob', path: 'README.md', size: 1024 }, - { type: 'blob', path: 'openapi.yaml', size: 4096 }, - { type: 'blob', path: 'asyncapi.yaml', size: 2048 }, - { type: 'blob', path: 'docs/architecture.md', size: 2048 }, - { type: 'blob', path: 'docs/deploy.md', size: 512 }, - { type: 'blob', path: 'src/index.js', size: 300 }, - { type: 'blob', path: 'package.json', size: 800 }, - { type: 'blob', path: 'readme.md', size: 0 }, // empty → excluded - { type: 'tree', path: 'docs', size: 0 }, // dir entry → excluded + {type: 'blob', path: 'README.md', size: 1024}, + {type: 'blob', path: 'openapi.yaml', size: 4096}, + {type: 'blob', path: 'asyncapi.yaml', size: 2048}, + {type: 'blob', path: 'docs/architecture.md', size: 2048}, + {type: 'blob', path: 'docs/deploy.md', size: 512}, + {type: 'blob', path: 'src/index.js', size: 300}, + {type: 'blob', path: 'package.json', size: 800}, + {type: 'blob', path: 'readme.md', size: 0}, // empty → excluded + {type: 'tree', path: 'docs', size: 0}, // dir entry → excluded ], } function setupDocHandlers() { server.use( http.get('https://api.github.com/repos/:owner/:repo', () => - HttpResponse.json({ default_branch: 'main', name: 'my-repo', full_name: 'my-org/my-repo' }), + HttpResponse.json({default_branch: 'main', name: 'my-repo', full_name: 'my-org/my-repo'}), ), http.get('https://api.github.com/repos/:owner/:repo/git/ref/*', () => - HttpResponse.json({ object: { sha: 'abc123def456' } }), + HttpResponse.json({object: {sha: 'abc123def456'}}), ), - http.get('https://api.github.com/repos/:owner/:repo/git/trees/:sha', () => - HttpResponse.json(TREE_RESPONSE), - ), - http.get('https://api.github.com/repos/:owner/:repo/contents/:path', ({ params }) => { + http.get('https://api.github.com/repos/:owner/:repo/git/trees/:sha', () => HttpResponse.json(TREE_RESPONSE)), + http.get('https://api.github.com/repos/:owner/:repo/contents/:path', ({params}) => { const content = `# Doc: ${params.path}\n\nThis is the content of ${params.path}` return HttpResponse.json({ type: 'file', @@ -56,7 +54,7 @@ describe('detectCurrentRepo()', () => { beforeEach(() => vi.clearAllMocks()) it('detects owner and repo from git remote URL', async () => { - const { detectCurrentRepo } = await import('../../src/services/docs.js') + const {detectCurrentRepo} = await import('../../src/services/docs.js') const result = await detectCurrentRepo() expect(result.owner).toBe('my-org') expect(result.repo).toBe('my-repo') @@ -70,7 +68,7 @@ describe('listDocs()', () => { }) it('returns only classified doc files', async () => { - const { listDocs } = await import('../../src/services/docs.js') + const {listDocs} = await import('../../src/services/docs.js') const entries = await listDocs('my-org', 'my-repo') // Should NOT include: src/index.js, package.json, readme.md (empty), docs (tree) @@ -88,7 +86,7 @@ describe('listDocs()', () => { }) it('classifies types correctly', async () => { - const { listDocs } = await import('../../src/services/docs.js') + const {listDocs} = await import('../../src/services/docs.js') const entries = await listDocs('my-org', 'my-repo') expect(entries.find((e) => e.path === 'README.md')?.type).toBe('readme') @@ -98,7 +96,7 @@ describe('listDocs()', () => { }) it('sorts by type priority (readme first, then swagger, asyncapi, doc)', async () => { - const { listDocs } = await import('../../src/services/docs.js') + const {listDocs} = await import('../../src/services/docs.js') const entries = await listDocs('my-org', 'my-repo') const types = entries.map((e) => e.type) @@ -120,7 +118,7 @@ describe('readFile()', () => { }) it('decodes base64 content correctly', async () => { - const { readFile } = await import('../../src/services/docs.js') + const {readFile} = await import('../../src/services/docs.js') const content = await readFile('my-org', 'my-repo', 'README.md') expect(typeof content).toBe('string') expect(content).toContain('README.md') @@ -134,7 +132,7 @@ describe('searchDocs()', () => { }) it('finds matches in doc files', async () => { - const { searchDocs } = await import('../../src/services/docs.js') + const {searchDocs} = await import('../../src/services/docs.js') // The mock returns "# Doc: {path}\n\nThis is the content of {path}" // searching for "content" should match every file const matches = await searchDocs('my-org', 'my-repo', 'content') @@ -148,7 +146,7 @@ describe('searchDocs()', () => { }) it('returns empty array when term not found', async () => { - const { searchDocs } = await import('../../src/services/docs.js') + const {searchDocs} = await import('../../src/services/docs.js') const matches = await searchDocs('my-org', 'my-repo', 'xyz_nonexistent_string_99999') expect(matches).toHaveLength(0) }) diff --git a/tests/services/doctor.test.js b/tests/services/doctor.test.js index 2edd449..7680668 100644 --- a/tests/services/doctor.test.js +++ b/tests/services/doctor.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' vi.mock('../../src/services/shell.js', () => ({ exec: vi.fn(), @@ -17,12 +17,12 @@ describe('doctor command logic', () => { beforeEach(() => vi.clearAllMocks()) it('reports tool as ok when found', async () => { - const { which, exec } = await import('../../src/services/shell.js') - const { checkGitHubAuth, checkAWSAuth } = await import('../../src/services/auth.js') + const {which, exec} = await import('../../src/services/shell.js') + const {checkGitHubAuth, checkAWSAuth} = await import('../../src/services/auth.js') vi.mocked(which).mockResolvedValue('/usr/local/bin/node') - vi.mocked(exec).mockResolvedValue({ stdout: 'v25.2.1', stderr: '', exitCode: 0 }) - vi.mocked(checkGitHubAuth).mockResolvedValue({ authenticated: true, username: 'testdev' }) - vi.mocked(checkAWSAuth).mockResolvedValue({ authenticated: true, account: '123456789012', role: 'dev' }) + vi.mocked(exec).mockResolvedValue({stdout: 'v25.2.1', stderr: '', exitCode: 0}) + vi.mocked(checkGitHubAuth).mockResolvedValue({authenticated: true, username: 'testdev'}) + vi.mocked(checkAWSAuth).mockResolvedValue({authenticated: true, account: '123456789012', role: 'dev'}) // Import and run the actual status check logic inline const path = await which('node') @@ -32,15 +32,15 @@ describe('doctor command logic', () => { }) it('reports tool as fail when not found (required)', async () => { - const { which } = await import('../../src/services/shell.js') + const {which} = await import('../../src/services/shell.js') vi.mocked(which).mockResolvedValue(null) const path = await which('node') expect(path).toBeNull() }) it('checkGitHubAuth returns fail hint', async () => { - const { checkGitHubAuth } = await import('../../src/services/auth.js') - vi.mocked(checkGitHubAuth).mockResolvedValue({ authenticated: false, error: 'not logged in' }) + const {checkGitHubAuth} = await import('../../src/services/auth.js') + vi.mocked(checkGitHubAuth).mockResolvedValue({authenticated: false, error: 'not logged in'}) const result = await checkGitHubAuth() expect(result.authenticated).toBe(false) }) diff --git a/tests/services/dotfiles/dotfiles.test.js b/tests/services/dotfiles/dotfiles.test.js index 3af8200..b7b70a5 100644 --- a/tests/services/dotfiles/dotfiles.test.js +++ b/tests/services/dotfiles/dotfiles.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' vi.mock('../../../src/services/shell.js', () => ({ which: vi.fn(), @@ -23,7 +23,7 @@ import { getChezmoiRemote, hasLocalChanges, } from '../../../src/services/dotfiles.js' -import { which, exec } from '../../../src/services/shell.js' +import {which, exec} from '../../../src/services/shell.js' beforeEach(() => { vi.resetAllMocks() @@ -49,25 +49,25 @@ describe('isChezmoiInstalled()', () => { // --------------------------------------------------------------------------- describe('getChezmoiConfig()', () => { it('returns parsed object when chezmoi dump-config succeeds', async () => { - const mockConfig = { encryption: { tool: 'age' }, sourceDir: '/home/user/.local/share/chezmoi' } - exec.mockResolvedValue({ stdout: JSON.stringify(mockConfig), stderr: '', exitCode: 0 }) + const mockConfig = {encryption: {tool: 'age'}, sourceDir: '/home/user/.local/share/chezmoi'} + exec.mockResolvedValue({stdout: JSON.stringify(mockConfig), stderr: '', exitCode: 0}) const result = await getChezmoiConfig() expect(result).toEqual(mockConfig) expect(exec).toHaveBeenCalledWith('chezmoi', ['dump-config', '--format', 'json']) }) it('returns null when chezmoi exits non-zero', async () => { - exec.mockResolvedValue({ stdout: '', stderr: 'not initialized', exitCode: 1 }) + exec.mockResolvedValue({stdout: '', stderr: 'not initialized', exitCode: 1}) expect(await getChezmoiConfig()).toBeNull() }) it('returns null when output is empty', async () => { - exec.mockResolvedValue({ stdout: '', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: '', stderr: '', exitCode: 0}) expect(await getChezmoiConfig()).toBeNull() }) it('returns null when output is invalid JSON', async () => { - exec.mockResolvedValue({ stdout: 'not json', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: 'not json', stderr: '', exitCode: 0}) expect(await getChezmoiConfig()).toBeNull() }) }) @@ -77,16 +77,16 @@ describe('getChezmoiConfig()', () => { // --------------------------------------------------------------------------- describe('getManagedFiles()', () => { it('returns empty array when chezmoi managed exits non-zero', async () => { - exec.mockResolvedValue({ stdout: '', stderr: 'error', exitCode: 1 }) + exec.mockResolvedValue({stdout: '', stderr: 'error', exitCode: 1}) expect(await getManagedFiles()).toEqual([]) }) it('parses managed files with plaintext source paths', async () => { const raw = [ - { targetPath: '/home/user/.zshrc', sourcePath: '/home/user/.local/share/chezmoi/dot_zshrc', type: 'file' }, - { targetPath: '/home/user/.gitconfig', sourcePath: '/home/user/.local/share/chezmoi/dot_gitconfig', type: 'file' }, + {targetPath: '/home/user/.zshrc', sourcePath: '/home/user/.local/share/chezmoi/dot_zshrc', type: 'file'}, + {targetPath: '/home/user/.gitconfig', sourcePath: '/home/user/.local/share/chezmoi/dot_gitconfig', type: 'file'}, ] - exec.mockResolvedValue({ stdout: JSON.stringify(raw), stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: JSON.stringify(raw), stderr: '', exitCode: 0}) const files = await getManagedFiles() expect(files).toHaveLength(2) expect(files[0].path).toBe('/home/user/.zshrc') @@ -96,29 +96,37 @@ describe('getManagedFiles()', () => { it('detects encrypted files from source path basename', async () => { const raw = [ - { targetPath: '/home/user/.ssh/id_ed25519', sourcePath: '/home/user/.local/share/chezmoi/private_dot_ssh/encrypted_id_ed25519.age', type: 'file' }, + { + targetPath: '/home/user/.ssh/id_ed25519', + sourcePath: '/home/user/.local/share/chezmoi/private_dot_ssh/encrypted_id_ed25519.age', + type: 'file', + }, ] - exec.mockResolvedValue({ stdout: JSON.stringify(raw), stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: JSON.stringify(raw), stderr: '', exitCode: 0}) const files = await getManagedFiles() expect(files[0].encrypted).toBe(true) }) it('detects encrypted files from source path parent dir', async () => { const raw = [ - { targetPath: '/home/user/.netrc', sourcePath: '/home/user/.local/share/chezmoi/encrypted_dot_netrc.age', type: 'file' }, + { + targetPath: '/home/user/.netrc', + sourcePath: '/home/user/.local/share/chezmoi/encrypted_dot_netrc.age', + type: 'file', + }, ] - exec.mockResolvedValue({ stdout: JSON.stringify(raw), stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: JSON.stringify(raw), stderr: '', exitCode: 0}) const files = await getManagedFiles() expect(files[0].encrypted).toBe(true) }) it('returns empty array when JSON parse fails', async () => { - exec.mockResolvedValue({ stdout: 'bad json', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: 'bad json', stderr: '', exitCode: 0}) expect(await getManagedFiles()).toEqual([]) }) it('returns empty array when chezmoi returns non-array JSON', async () => { - exec.mockResolvedValue({ stdout: '{}', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: '{}', stderr: '', exitCode: 0}) expect(await getManagedFiles()).toEqual([]) }) }) @@ -240,13 +248,13 @@ describe('getDefaultFileList()', () => { // --------------------------------------------------------------------------- describe('getSensitivePatterns()', () => { it('returns default SENSITIVE_PATTERNS when no custom patterns set', () => { - const config = { org: 'acme', awsProfile: 'dev' } + const config = {org: 'acme', awsProfile: 'dev'} const patterns = getSensitivePatterns(config) expect(patterns).toEqual(SENSITIVE_PATTERNS) }) it('merges custom patterns with defaults', () => { - const config = { org: 'acme', awsProfile: 'dev', dotfiles: { enabled: true, customSensitivePatterns: ['~/.my-vault'] } } + const config = {org: 'acme', awsProfile: 'dev', dotfiles: {enabled: true, customSensitivePatterns: ['~/.my-vault']}} const patterns = getSensitivePatterns(config) expect(patterns).toContain('~/.my-vault') expect(patterns).toContain('~/.netrc') // still has defaults @@ -254,7 +262,7 @@ describe('getSensitivePatterns()', () => { }) it('returns defaults when dotfiles config has no customSensitivePatterns', () => { - const config = { org: 'acme', awsProfile: 'dev', dotfiles: { enabled: true } } + const config = {org: 'acme', awsProfile: 'dev', dotfiles: {enabled: true}} const patterns = getSensitivePatterns(config) expect(patterns).toEqual(SENSITIVE_PATTERNS) }) @@ -265,14 +273,14 @@ describe('getSensitivePatterns()', () => { // --------------------------------------------------------------------------- describe('getChezmoiRemote()', () => { it('returns URL when remote is configured', async () => { - exec.mockResolvedValue({ stdout: 'git@github.com:user/dotfiles.git', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: 'git@github.com:user/dotfiles.git', stderr: '', exitCode: 0}) const remote = await getChezmoiRemote() expect(remote).toBe('git@github.com:user/dotfiles.git') expect(exec).toHaveBeenCalledWith('chezmoi', ['git', '--', 'remote', 'get-url', 'origin']) }) it('returns null when no remote configured', async () => { - exec.mockResolvedValue({ stdout: '', stderr: 'fatal: no such remote', exitCode: 128 }) + exec.mockResolvedValue({stdout: '', stderr: 'fatal: no such remote', exitCode: 128}) expect(await getChezmoiRemote()).toBeNull() }) }) @@ -282,17 +290,17 @@ describe('getChezmoiRemote()', () => { // --------------------------------------------------------------------------- describe('hasLocalChanges()', () => { it('returns true when there are local changes', async () => { - exec.mockResolvedValue({ stdout: ' M dot_zshrc\n M dot_gitconfig', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: ' M dot_zshrc\n M dot_gitconfig', stderr: '', exitCode: 0}) expect(await hasLocalChanges()).toBe(true) }) it('returns false when working tree is clean', async () => { - exec.mockResolvedValue({ stdout: '', stderr: '', exitCode: 0 }) + exec.mockResolvedValue({stdout: '', stderr: '', exitCode: 0}) expect(await hasLocalChanges()).toBe(false) }) it('returns false when chezmoi git status fails', async () => { - exec.mockResolvedValue({ stdout: '', stderr: 'not a git repo', exitCode: 128 }) + exec.mockResolvedValue({stdout: '', stderr: 'not a git repo', exitCode: 128}) expect(await hasLocalChanges()).toBe(false) }) }) diff --git a/tests/services/github-pr.test.js b/tests/services/github-pr.test.js index 28a7312..eadf16b 100644 --- a/tests/services/github-pr.test.js +++ b/tests/services/github-pr.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' vi.mock('../../src/services/shell.js', () => ({ exec: vi.fn(), @@ -8,10 +8,10 @@ describe('getPRDetail', () => { beforeEach(() => vi.clearAllMocks()) it('restituisce PRDetail con qaComments e qaSteps dal mock MSW', async () => { - const { exec } = await import('../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + const {exec} = await import('../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) - const { getPRDetail } = await import('../../src/services/github.js') + const {getPRDetail} = await import('../../src/services/github.js') const detail = await getPRDetail('acme', 'my-api', 42) expect(detail.number).toBe(42) @@ -27,10 +27,10 @@ describe('getPRDetail', () => { }) it('identifica correttamente i commenti QA tramite autore', async () => { - const { exec } = await import('../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + const {exec} = await import('../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) - const { getPRDetail } = await import('../../src/services/github.js') + const {getPRDetail} = await import('../../src/services/github.js') const detail = await getPRDetail('acme', 'my-api', 42) // Il commento di "qa-engineer" deve essere classificato come QA @@ -39,10 +39,10 @@ describe('getPRDetail', () => { }) it('estrae qaSteps dalle checklist nei commenti QA', async () => { - const { exec } = await import('../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + const {exec} = await import('../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) - const { getPRDetail } = await import('../../src/services/github.js') + const {getPRDetail} = await import('../../src/services/github.js') const detail = await getPRDetail('acme', 'my-api', 42) // Il mock ha "- [x] Testare flusso login" e "- [ ] Verificare logout" diff --git a/tests/services/nvd.test.js b/tests/services/nvd.test.js index fb86571..174a7a9 100644 --- a/tests/services/nvd.test.js +++ b/tests/services/nvd.test.js @@ -1,38 +1,38 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import {describe, it, expect, vi, beforeEach, afterEach} from 'vitest' // Mock loadConfig so tests don't need a real config file vi.mock('../../src/services/config.js', () => ({ - loadConfig: vi.fn().mockResolvedValue({ org: 'acme', nvd: undefined }), + loadConfig: vi.fn().mockResolvedValue({org: 'acme', nvd: undefined}), })) describe('normalizeSeverity', () => { it('maps CRITICAL to Critical', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity('CRITICAL')).toBe('Critical') }) it('maps HIGH to High', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity('HIGH')).toBe('High') }) it('maps MEDIUM to Medium', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity('MEDIUM')).toBe('Medium') }) it('maps LOW to Low', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity('LOW')).toBe('Low') }) it('returns Unknown for undefined', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity(undefined)).toBe('Unknown') }) it('returns Unknown for unrecognized string', async () => { - const { normalizeSeverity } = await import('../../src/services/nvd.js') + const {normalizeSeverity} = await import('../../src/services/nvd.js') expect(normalizeSeverity('NONE')).toBe('Unknown') }) }) @@ -52,11 +52,11 @@ describe('searchCves', () => { it('succeeds with empty keyword (treated as no keyword filter)', async () => { global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: async () => ({ resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: [] }), + json: async () => ({resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: []}), }) - const { searchCves } = await import('../../src/services/nvd.js') - const result = await searchCves({ keyword: '' }) - expect(result).toEqual({ results: [], totalResults: 0 }) + const {searchCves} = await import('../../src/services/nvd.js') + const result = await searchCves({keyword: ''}) + expect(result).toEqual({results: [], totalResults: 0}) // keywordSearch param should NOT be in the URL when keyword is empty const calledUrl = global.fetch.mock.calls[0][0] expect(calledUrl).not.toContain('keywordSearch') @@ -65,11 +65,11 @@ describe('searchCves', () => { it('succeeds with whitespace keyword (treated as no keyword filter)', async () => { global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: async () => ({ resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: [] }), + json: async () => ({resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: []}), }) - const { searchCves } = await import('../../src/services/nvd.js') - const result = await searchCves({ keyword: ' ' }) - expect(result).toEqual({ results: [], totalResults: 0 }) + const {searchCves} = await import('../../src/services/nvd.js') + const result = await searchCves({keyword: ' '}) + expect(result).toEqual({results: [], totalResults: 0}) const calledUrl = global.fetch.mock.calls[0][0] expect(calledUrl).not.toContain('keywordSearch') }) @@ -87,9 +87,9 @@ describe('searchCves', () => { id: 'CVE-2026-1234', published: '2026-03-25T00:00:00.000', lastModified: '2026-03-26T00:00:00.000', - descriptions: [{ lang: 'en', value: 'Test vulnerability.' }], + descriptions: [{lang: 'en', value: 'Test vulnerability.'}], metrics: { - cvssMetricV31: [{ cvssData: { baseScore: 9.8, baseSeverity: 'CRITICAL', vectorString: null } }], + cvssMetricV31: [{cvssData: {baseScore: 9.8, baseSeverity: 'CRITICAL', vectorString: null}}], }, weaknesses: [], configurations: [], @@ -100,8 +100,8 @@ describe('searchCves', () => { }), }) - const { searchCves } = await import('../../src/services/nvd.js') - const { results, totalResults } = await searchCves({ keyword: 'openssl', days: 14 }) + const {searchCves} = await import('../../src/services/nvd.js') + const {results, totalResults} = await searchCves({keyword: 'openssl', days: 14}) expect(totalResults).toBe(1) expect(results).toHaveLength(1) @@ -125,52 +125,52 @@ describe('searchCves', () => { id: 'CVE-2026-9999', published: '2026-03-25T00:00:00.000', lastModified: '2026-03-26T00:00:00.000', - descriptions: [{ lang: 'en', value: 'Test.' }], + descriptions: [{lang: 'en', value: 'Test.'}], metrics: {}, weaknesses: [], configurations: [], references: [ - { url: 'https://example.com/advisory', source: 'example.com', tags: [] }, - { url: 'https://other.com', source: 'other.com', tags: [] }, + {url: 'https://example.com/advisory', source: 'example.com', tags: []}, + {url: 'https://other.com', source: 'other.com', tags: []}, ], }, }, ], }), }) - const { searchCves } = await import('../../src/services/nvd.js') - const { results } = await searchCves({ keyword: 'test' }) + const {searchCves} = await import('../../src/services/nvd.js') + const {results} = await searchCves({keyword: 'test'}) expect(results[0].firstReference).toBe('https://example.com/advisory') }) it('returns empty results when no vulnerabilities found', async () => { global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: async () => ({ resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: [] }), + json: async () => ({resultsPerPage: 0, startIndex: 0, totalResults: 0, vulnerabilities: []}), }) - const { searchCves } = await import('../../src/services/nvd.js') - const { results, totalResults } = await searchCves({ keyword: 'veryrareunknownlib' }) + const {searchCves} = await import('../../src/services/nvd.js') + const {results, totalResults} = await searchCves({keyword: 'veryrareunknownlib'}) expect(totalResults).toBe(0) expect(results).toHaveLength(0) }) it('throws DvmiError on HTTP error response', async () => { - global.fetch = vi.fn().mockResolvedValue({ ok: false, status: 503 }) + global.fetch = vi.fn().mockResolvedValue({ok: false, status: 503}) - const { searchCves } = await import('../../src/services/nvd.js') - await expect(searchCves({ keyword: 'openssl' })).rejects.toThrow('NVD API returned HTTP 503') + const {searchCves} = await import('../../src/services/nvd.js') + await expect(searchCves({keyword: 'openssl'})).rejects.toThrow('NVD API returned HTTP 503') }) it('uses severity filter parameter when provided', async () => { global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: async () => ({ resultsPerPage: 0, totalResults: 0, vulnerabilities: [] }), + json: async () => ({resultsPerPage: 0, totalResults: 0, vulnerabilities: []}), }) - const { searchCves } = await import('../../src/services/nvd.js') - await searchCves({ keyword: 'openssl', severity: 'critical' }) + const {searchCves} = await import('../../src/services/nvd.js') + await searchCves({keyword: 'openssl', severity: 'critical'}) const calledUrl = /** @type {any} */ (global.fetch).mock.calls[0][0] expect(calledUrl).toContain('cvssV3Severity=CRITICAL') @@ -190,12 +190,12 @@ describe('getCveDetail', () => { }) it('throws DvmiError for invalid CVE ID format', async () => { - const { getCveDetail } = await import('../../src/services/nvd.js') + const {getCveDetail} = await import('../../src/services/nvd.js') await expect(getCveDetail('not-a-cve')).rejects.toThrow('Invalid CVE ID') }) it('throws DvmiError for empty CVE ID', async () => { - const { getCveDetail } = await import('../../src/services/nvd.js') + const {getCveDetail} = await import('../../src/services/nvd.js') await expect(getCveDetail('')).rejects.toThrow('Invalid CVE ID') }) @@ -212,24 +212,28 @@ describe('getCveDetail', () => { published: '2021-12-10T04:15:07.917', lastModified: '2023-11-07T03:39:36.747', vulnStatus: 'Analyzed', - descriptions: [{ lang: 'en', value: 'Apache Log4j2 JNDI vulnerability.' }], + descriptions: [{lang: 'en', value: 'Apache Log4j2 JNDI vulnerability.'}], metrics: { - cvssMetricV31: [{ cvssData: { baseScore: 10.0, baseSeverity: 'CRITICAL', vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H' } }], + cvssMetricV31: [ + { + cvssData: { + baseScore: 10.0, + baseSeverity: 'CRITICAL', + vectorString: 'CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:H/A:H', + }, + }, + ], }, - weaknesses: [ - { description: [{ lang: 'en', value: 'CWE-502' }] }, - ], + weaknesses: [{description: [{lang: 'en', value: 'CWE-502'}]}], configurations: [], - references: [ - { url: 'https://example.com', source: 'test', tags: ['Vendor Advisory'] }, - ], + references: [{url: 'https://example.com', source: 'test', tags: ['Vendor Advisory']}], }, }, ], }), }) - const { getCveDetail } = await import('../../src/services/nvd.js') + const {getCveDetail} = await import('../../src/services/nvd.js') const detail = await getCveDetail('CVE-2021-44228') expect(detail.id).toBe('CVE-2021-44228') @@ -246,10 +250,10 @@ describe('getCveDetail', () => { it('throws DvmiError when CVE not found', async () => { global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: async () => ({ resultsPerPage: 0, totalResults: 0, vulnerabilities: [] }), + json: async () => ({resultsPerPage: 0, totalResults: 0, vulnerabilities: []}), }) - const { getCveDetail } = await import('../../src/services/nvd.js') + const {getCveDetail} = await import('../../src/services/nvd.js') await expect(getCveDetail('CVE-2099-99999')).rejects.toThrow('CVE not found') }) @@ -266,9 +270,9 @@ describe('getCveDetail', () => { published: '2010-01-01T00:00:00.000', lastModified: '2010-01-02T00:00:00.000', vulnStatus: 'Analyzed', - descriptions: [{ lang: 'en', value: 'Old CVE.' }], + descriptions: [{lang: 'en', value: 'Old CVE.'}], metrics: { - cvssMetricV2: [{ cvssData: { baseScore: 7.8, baseSeverity: 'HIGH', vectorString: null } }], + cvssMetricV2: [{cvssData: {baseScore: 7.8, baseSeverity: 'HIGH', vectorString: null}}], }, weaknesses: [], configurations: [], @@ -279,7 +283,7 @@ describe('getCveDetail', () => { }), }) - const { getCveDetail } = await import('../../src/services/nvd.js') + const {getCveDetail} = await import('../../src/services/nvd.js') const detail = await getCveDetail('CVE-2010-0001') expect(detail.severity).toBe('High') expect(detail.score).toBeCloseTo(7.8) diff --git a/tests/services/pr-qa.test.js b/tests/services/pr-qa.test.js index fd3c8ec..d427320 100644 --- a/tests/services/pr-qa.test.js +++ b/tests/services/pr-qa.test.js @@ -1,39 +1,39 @@ -import { describe, it, expect, vi } from 'vitest' -import { extractQASteps, isQAComment } from '../../src/services/github.js' +import {describe, it, expect, vi} from 'vitest' +import {extractQASteps, isQAComment} from '../../src/services/github.js' vi.mock('../../src/services/shell.js', () => ({ - exec: vi.fn().mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }), + exec: vi.fn().mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}), })) describe('extractQASteps', () => { it('estrae step non completati', () => { const body = '- [ ] Testare login\n- [ ] Verificare logout' expect(extractQASteps(body)).toEqual([ - { text: 'Testare login', checked: false }, - { text: 'Verificare logout', checked: false }, + {text: 'Testare login', checked: false}, + {text: 'Verificare logout', checked: false}, ]) }) it('estrae step completati con [x]', () => { const body = '- [x] Step completato\n- [X] Altro step maiuscolo' expect(extractQASteps(body)).toEqual([ - { text: 'Step completato', checked: true }, - { text: 'Altro step maiuscolo', checked: true }, + {text: 'Step completato', checked: true}, + {text: 'Altro step maiuscolo', checked: true}, ]) }) it('gestisce mix di step completati e non', () => { const body = '- [x] Primo\n- [ ] Secondo\n- [x] Terzo' expect(extractQASteps(body)).toEqual([ - { text: 'Primo', checked: true }, - { text: 'Secondo', checked: false }, - { text: 'Terzo', checked: true }, + {text: 'Primo', checked: true}, + {text: 'Secondo', checked: false}, + {text: 'Terzo', checked: true}, ]) }) it('ignora righe di testo normale', () => { const body = 'Testo normale\n- [ ] Solo questo\nAltro testo' - expect(extractQASteps(body)).toEqual([{ text: 'Solo questo', checked: false }]) + expect(extractQASteps(body)).toEqual([{text: 'Solo questo', checked: false}]) }) it('restituisce array vuoto se nessuna checklist', () => { @@ -42,7 +42,7 @@ describe('extractQASteps', () => { it('gestisce indentazione negli step', () => { const body = ' - [ ] Step indentato' - expect(extractQASteps(body)).toEqual([{ text: 'Step indentato', checked: false }]) + expect(extractQASteps(body)).toEqual([{text: 'Step indentato', checked: false}]) }) }) diff --git a/tests/services/pr.test.js b/tests/services/pr.test.js index 71f9a9c..00ce69c 100644 --- a/tests/services/pr.test.js +++ b/tests/services/pr.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect } from 'vitest' +import {describe, it, expect} from 'vitest' // Test pure logic functions extracted from pr/index.js /** @@ -8,7 +8,7 @@ import { describe, it, expect } from 'vitest' function titleFromBranch(branchName) { const [type, ...rest] = branchName.split('/') const desc = rest.join('/').replace(/-/g, ' ') - const typeMap = { feature: 'Feature', fix: 'Fix', chore: 'Chore', hotfix: 'Hotfix' } + const typeMap = {feature: 'Feature', fix: 'Fix', chore: 'Chore', hotfix: 'Hotfix'} return `${typeMap[type] ?? type}: ${desc}` } @@ -17,7 +17,7 @@ function titleFromBranch(branchName) { * @returns {string[]} */ function labelFromType(branchType) { - const map = { feature: ['feature'], fix: ['bug'], chore: ['chore'], hotfix: ['critical'] } + const map = {feature: ['feature'], fix: ['bug'], chore: ['chore'], hotfix: ['critical']} return map[branchType] ?? [] } diff --git a/tests/services/prompts/awesome-copilot.test.js b/tests/services/prompts/awesome-copilot.test.js index 7e4ac0c..61a60f2 100644 --- a/tests/services/prompts/awesome-copilot.test.js +++ b/tests/services/prompts/awesome-copilot.test.js @@ -1,6 +1,6 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' -import { http, HttpResponse } from 'msw' -import { server } from '../../setup.js' +import {describe, it, expect, vi, beforeEach} from 'vitest' +import {http, HttpResponse} from 'msw' +import {server} from '../../setup.js' vi.mock('../../../src/services/shell.js', () => ({ exec: vi.fn(), @@ -23,7 +23,7 @@ const EMPTY_MD = `# Awesome Copilot Skills\n\nNo entries yet.\n` describe('parseMarkdownTable', () => { it('parses table rows into AwesomeEntry[]', async () => { - const { parseMarkdownTable } = await import('../../../src/services/awesome-copilot.js') + const {parseMarkdownTable} = await import('../../../src/services/awesome-copilot.js') const entries = parseMarkdownTable(AGENTS_MD, 'agents') expect(entries).toHaveLength(3) @@ -35,7 +35,7 @@ describe('parseMarkdownTable', () => { }) it('strips markdown badges from name cell', async () => { - const { parseMarkdownTable } = await import('../../../src/services/awesome-copilot.js') + const {parseMarkdownTable} = await import('../../../src/services/awesome-copilot.js') const entries = parseMarkdownTable(AGENTS_MD, 'agents') const badge = entries.find((e) => e.name === 'Badge Agent') @@ -45,7 +45,7 @@ describe('parseMarkdownTable', () => { }) it('returns empty array for file with no table rows', async () => { - const { parseMarkdownTable } = await import('../../../src/services/awesome-copilot.js') + const {parseMarkdownTable} = await import('../../../src/services/awesome-copilot.js') const entries = parseMarkdownTable(EMPTY_MD, 'skills') expect(entries).toHaveLength(0) }) @@ -54,24 +54,22 @@ describe('parseMarkdownTable', () => { describe('fetchAwesomeEntries', () => { beforeEach(async () => { vi.clearAllMocks() - const { exec } = await import('../../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + const {exec} = await import('../../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) }) it('fetches and parses agents category', async () => { server.use( - http.get( - 'https://api.github.com/repos/github/awesome-copilot/contents/:path*', - () => - HttpResponse.json({ - type: 'file', - encoding: 'base64', - content: toBase64(AGENTS_MD), - }), + http.get('https://api.github.com/repos/github/awesome-copilot/contents/:path*', () => + HttpResponse.json({ + type: 'file', + encoding: 'base64', + content: toBase64(AGENTS_MD), + }), ), ) - const { fetchAwesomeEntries } = await import('../../../src/services/awesome-copilot.js') + const {fetchAwesomeEntries} = await import('../../../src/services/awesome-copilot.js') const entries = await fetchAwesomeEntries('agents') expect(entries.length).toBeGreaterThan(0) @@ -80,8 +78,8 @@ describe('fetchAwesomeEntries', () => { }) it('throws DvmiError for unknown category', async () => { - const { fetchAwesomeEntries } = await import('../../../src/services/awesome-copilot.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {fetchAwesomeEntries} = await import('../../../src/services/awesome-copilot.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(fetchAwesomeEntries('unknown-cat')).rejects.toThrow(DvmiError) await expect(fetchAwesomeEntries('unknown-cat')).rejects.toThrow(/unknown/i) @@ -89,14 +87,13 @@ describe('fetchAwesomeEntries', () => { it('throws DvmiError when category file returns 404', async () => { server.use( - http.get( - 'https://api.github.com/repos/github/awesome-copilot/contents/:path*', - () => HttpResponse.json({ message: 'Not Found' }, { status: 404 }), + http.get('https://api.github.com/repos/github/awesome-copilot/contents/:path*', () => + HttpResponse.json({message: 'Not Found'}, {status: 404}), ), ) - const { fetchAwesomeEntries } = await import('../../../src/services/awesome-copilot.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {fetchAwesomeEntries} = await import('../../../src/services/awesome-copilot.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(fetchAwesomeEntries('agents')).rejects.toThrow(DvmiError) }) diff --git a/tests/services/prompts/prompts.test.js b/tests/services/prompts/prompts.test.js index 9d1fe07..8f28cc7 100644 --- a/tests/services/prompts/prompts.test.js +++ b/tests/services/prompts/prompts.test.js @@ -1,7 +1,7 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' -import { http, HttpResponse } from 'msw' -import { vol } from 'memfs' -import { server } from '../../setup.js' +import {describe, it, expect, vi, beforeEach} from 'vitest' +import {http, HttpResponse} from 'msw' +import {vol} from 'memfs' +import {server} from '../../setup.js' vi.mock('../../../src/services/shell.js', () => ({ exec: vi.fn(), @@ -14,7 +14,7 @@ vi.mock('execa', () => ({ // Redirect Node's fs/promises to memfs so downloadPrompt writes to an in-memory FS vi.mock('node:fs/promises', async () => { - const { fs } = await import('memfs') + const {fs} = await import('memfs') return fs.promises }) @@ -55,7 +55,7 @@ const PLAIN_CONTENT = 'Just a plain prompt without frontmatter.' */ function treeHandler(items) { return http.get('https://api.github.com/repos/savez/prompt-for-ai/git/trees/:sha', () => - HttpResponse.json({ tree: items, truncated: false }), + HttpResponse.json({tree: items, truncated: false}), ) } @@ -67,14 +67,14 @@ function treeHandler(items) { * @returns {import('msw').HttpHandler} */ function contentsHandler(contentMap, status = 200) { - return http.get('https://api.github.com/repos/savez/prompt-for-ai/contents/:path*', ({ params }) => { + return http.get('https://api.github.com/repos/savez/prompt-for-ai/contents/:path*', ({params}) => { const filePath = Array.isArray(params.path) ? params.path.join('/') : String(params['path*'] ?? params.path ?? '') if (status !== 200) { - return HttpResponse.json({ message: 'Not Found' }, { status }) + return HttpResponse.json({message: 'Not Found'}, {status}) } const content = contentMap[filePath] if (content === undefined) { - return HttpResponse.json({ message: 'Not Found' }, { status: 404 }) + return HttpResponse.json({message: 'Not Found'}, {status: 404}) } return HttpResponse.json({ type: 'file', @@ -89,15 +89,15 @@ describe('listPrompts', () => { beforeEach(async () => { vi.clearAllMocks() const shell = await import('../../../src/services/shell.js') - vi.mocked(shell.exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + vi.mocked(shell.exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) }) it('returns parsed Prompt[] from repository tree + file contents', async () => { server.use( treeHandler([ - { type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc' }, - { type: 'blob', path: 'testing/test-generator.md', sha: 'def' }, - { type: 'tree', path: 'coding', sha: 'xyz' }, // directories should be skipped + {type: 'blob', path: 'coding/refactor-prompt.md', sha: 'abc'}, + {type: 'blob', path: 'testing/test-generator.md', sha: 'def'}, + {type: 'tree', path: 'coding', sha: 'xyz'}, // directories should be skipped ]), contentsHandler({ 'coding/refactor-prompt.md': PROMPT_1_CONTENT, @@ -105,7 +105,7 @@ describe('listPrompts', () => { }), ) - const { listPrompts } = await import('../../../src/services/prompts.js') + const {listPrompts} = await import('../../../src/services/prompts.js') const prompts = await listPrompts() expect(prompts).toHaveLength(2) @@ -124,11 +124,9 @@ describe('listPrompts', () => { }) it('returns empty array when repository has no markdown files', async () => { - server.use( - treeHandler([{ type: 'tree', path: 'coding', sha: 'xyz' }]), - ) + server.use(treeHandler([{type: 'tree', path: 'coding', sha: 'xyz'}])) - const { listPrompts } = await import('../../../src/services/prompts.js') + const {listPrompts} = await import('../../../src/services/prompts.js') const prompts = await listPrompts() expect(Array.isArray(prompts)).toBe(true) expect(prompts).toHaveLength(0) @@ -137,23 +135,23 @@ describe('listPrompts', () => { it('throws DvmiError when repository returns 404', async () => { server.use( http.get('https://api.github.com/repos/savez/prompt-for-ai/git/trees/:sha', () => - HttpResponse.json({ message: 'Not Found' }, { status: 404 }), + HttpResponse.json({message: 'Not Found'}, {status: 404}), ), ) - const { listPrompts } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {listPrompts} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(listPrompts()).rejects.toThrow(DvmiError) }) it('derives title and category from path when frontmatter is missing', async () => { server.use( - treeHandler([{ type: 'blob', path: 'general/my-plain-prompt.md', sha: 'aaa' }]), - contentsHandler({ 'general/my-plain-prompt.md': PLAIN_CONTENT }), + treeHandler([{type: 'blob', path: 'general/my-plain-prompt.md', sha: 'aaa'}]), + contentsHandler({'general/my-plain-prompt.md': PLAIN_CONTENT}), ) - const { listPrompts } = await import('../../../src/services/prompts.js') + const {listPrompts} = await import('../../../src/services/prompts.js') const prompts = await listPrompts() expect(prompts).toHaveLength(1) @@ -167,15 +165,13 @@ describe('fetchPromptByPath', () => { beforeEach(async () => { vi.clearAllMocks() const shell = await import('../../../src/services/shell.js') - vi.mocked(shell.exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + vi.mocked(shell.exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) }) it('fetches and parses a single prompt by path', async () => { - server.use( - contentsHandler({ 'coding/refactor-prompt.md': PROMPT_1_CONTENT }), - ) + server.use(contentsHandler({'coding/refactor-prompt.md': PROMPT_1_CONTENT})) - const { fetchPromptByPath } = await import('../../../src/services/prompts.js') + const {fetchPromptByPath} = await import('../../../src/services/prompts.js') const prompt = await fetchPromptByPath('coding/refactor-prompt.md') expect(prompt.title).toBe('Refactor Prompt') @@ -184,12 +180,10 @@ describe('fetchPromptByPath', () => { }) it('throws DvmiError with actionable hint when path does not exist', async () => { - server.use( - contentsHandler({}, 404), - ) + server.use(contentsHandler({}, 404)) - const { fetchPromptByPath } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {fetchPromptByPath} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(fetchPromptByPath('nonexistent/file.md')).rejects.toThrow(DvmiError) await expect(fetchPromptByPath('nonexistent/file.md')).rejects.toThrow(/not found/i) @@ -204,21 +198,19 @@ describe('downloadPrompt', () => { vol.reset() vi.clearAllMocks() const shell = await import('../../../src/services/shell.js') - vi.mocked(shell.exec).mockResolvedValue({ stdout: 'fake-gh-token', stderr: '', exitCode: 0 }) + vi.mocked(shell.exec).mockResolvedValue({stdout: 'fake-gh-token', stderr: '', exitCode: 0}) }) it('writes file at the correct path with frontmatter preserved', async () => { - server.use( - contentsHandler({ 'coding/refactor-prompt.md': PROMPT_1_CONTENT }), - ) + server.use(contentsHandler({'coding/refactor-prompt.md': PROMPT_1_CONTENT})) - const { downloadPrompt } = await import('../../../src/services/prompts.js') + const {downloadPrompt} = await import('../../../src/services/prompts.js') const result = await downloadPrompt('coding/refactor-prompt.md', LOCAL_DIR) expect(result.skipped).toBe(false) expect(result.path).toBe(`${LOCAL_DIR}/coding/refactor-prompt.md`) - const { fs } = await import('memfs') + const {fs} = await import('memfs') const written = fs.readFileSync(result.path, 'utf8') expect(written).toContain('title: Refactor Prompt') expect(written).toContain('category: coding') @@ -226,24 +218,22 @@ describe('downloadPrompt', () => { }) it('creates intermediate directories if they do not exist', async () => { - server.use( - contentsHandler({ 'deep/nested/dir/prompt.md': PROMPT_2_CONTENT }), - ) + server.use(contentsHandler({'deep/nested/dir/prompt.md': PROMPT_2_CONTENT})) - const { downloadPrompt } = await import('../../../src/services/prompts.js') + const {downloadPrompt} = await import('../../../src/services/prompts.js') const result = await downloadPrompt('deep/nested/dir/prompt.md', LOCAL_DIR) expect(result.skipped).toBe(false) - const { fs } = await import('memfs') + const {fs} = await import('memfs') expect(fs.existsSync(`${LOCAL_DIR}/deep/nested/dir/prompt.md`)).toBe(true) }) it('skips without network call when file already exists and overwrite is not set', async () => { - const { fs } = await import('memfs') - fs.mkdirSync(`${LOCAL_DIR}/coding`, { recursive: true }) + const {fs} = await import('memfs') + fs.mkdirSync(`${LOCAL_DIR}/coding`, {recursive: true}) fs.writeFileSync(`${LOCAL_DIR}/coding/refactor-prompt.md`, 'existing content') - const { downloadPrompt } = await import('../../../src/services/prompts.js') + const {downloadPrompt} = await import('../../../src/services/prompts.js') const result = await downloadPrompt('coding/refactor-prompt.md', LOCAL_DIR) expect(result.skipped).toBe(true) @@ -253,16 +243,14 @@ describe('downloadPrompt', () => { }) it('overwrites existing file when opts.overwrite is true', async () => { - server.use( - contentsHandler({ 'coding/refactor-prompt.md': PROMPT_1_CONTENT }), - ) + server.use(contentsHandler({'coding/refactor-prompt.md': PROMPT_1_CONTENT})) - const { fs } = await import('memfs') - fs.mkdirSync(`${LOCAL_DIR}/coding`, { recursive: true }) + const {fs} = await import('memfs') + fs.mkdirSync(`${LOCAL_DIR}/coding`, {recursive: true}) fs.writeFileSync(`${LOCAL_DIR}/coding/refactor-prompt.md`, 'old content') - const { downloadPrompt } = await import('../../../src/services/prompts.js') - const result = await downloadPrompt('coding/refactor-prompt.md', LOCAL_DIR, { overwrite: true }) + const {downloadPrompt} = await import('../../../src/services/prompts.js') + const result = await downloadPrompt('coding/refactor-prompt.md', LOCAL_DIR, {overwrite: true}) expect(result.skipped).toBe(false) const written = fs.readFileSync(result.path, 'utf8') @@ -273,8 +261,8 @@ describe('downloadPrompt', () => { it('throws DvmiError when prompt path does not exist in repo', async () => { server.use(contentsHandler({}, 404)) - const { downloadPrompt } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {downloadPrompt} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(downloadPrompt('nonexistent/prompt.md', LOCAL_DIR)).rejects.toThrow(DvmiError) }) @@ -289,7 +277,7 @@ describe('resolveLocalPrompt', () => { // Redirect Node's fs/promises to memfs so resolveLocalPrompt reads from an in-memory FS vi.mock('node:fs/promises', async () => { - const { fs } = await import('memfs') + const {fs} = await import('memfs') return fs.promises }) @@ -307,11 +295,11 @@ tags: --- Do something locally.` - const { fs } = await import('memfs') - fs.mkdirSync(`${LOCAL_DIR}/coding`, { recursive: true }) + const {fs} = await import('memfs') + fs.mkdirSync(`${LOCAL_DIR}/coding`, {recursive: true}) fs.writeFileSync(`${LOCAL_DIR}/coding/local-refactor.md`, content) - const { resolveLocalPrompt } = await import('../../../src/services/prompts.js') + const {resolveLocalPrompt} = await import('../../../src/services/prompts.js') const prompt = await resolveLocalPrompt('coding/local-refactor.md', LOCAL_DIR) expect(prompt.title).toBe('Local Refactor') @@ -321,11 +309,11 @@ Do something locally.` }) it('derives title from path when frontmatter is absent', async () => { - const { fs } = await import('memfs') - fs.mkdirSync(`${LOCAL_DIR}/general`, { recursive: true }) + const {fs} = await import('memfs') + fs.mkdirSync(`${LOCAL_DIR}/general`, {recursive: true}) fs.writeFileSync(`${LOCAL_DIR}/general/my-plain-prompt.md`, 'Plain text prompt.') - const { resolveLocalPrompt } = await import('../../../src/services/prompts.js') + const {resolveLocalPrompt} = await import('../../../src/services/prompts.js') const prompt = await resolveLocalPrompt('general/my-plain-prompt.md', LOCAL_DIR) expect(prompt.title).toBe('My Plain Prompt') @@ -333,16 +321,12 @@ Do something locally.` }) it('throws DvmiError with actionable hint when file does not exist', async () => { - const { resolveLocalPrompt } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {resolveLocalPrompt} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') - await expect( - resolveLocalPrompt('nonexistent/prompt.md', LOCAL_DIR), - ).rejects.toThrow(DvmiError) + await expect(resolveLocalPrompt('nonexistent/prompt.md', LOCAL_DIR)).rejects.toThrow(DvmiError) - await expect( - resolveLocalPrompt('nonexistent/prompt.md', LOCAL_DIR), - ).rejects.toThrow(/not found/i) + await expect(resolveLocalPrompt('nonexistent/prompt.md', LOCAL_DIR)).rejects.toThrow(/not found/i) }) }) @@ -357,33 +341,33 @@ describe('invokeTool', () => { it('calls execa with correct args for opencode', async () => { const shell = await import('../../../src/services/shell.js') - const { execa: mockedExeca } = await import('execa') + const {execa: mockedExeca} = await import('execa') vi.mocked(shell.which).mockResolvedValue('/usr/local/bin/opencode') vi.mocked(mockedExeca).mockResolvedValue(/** @type {any} */ ({})) - const { invokeTool } = await import('../../../src/services/prompts.js') + const {invokeTool} = await import('../../../src/services/prompts.js') await invokeTool('opencode', 'Refactor my code') expect(mockedExeca).toHaveBeenCalledWith( 'opencode', ['--prompt', 'Refactor my code'], - expect.objectContaining({ stdio: 'inherit' }), + expect.objectContaining({stdio: 'inherit'}), ) }) it('calls execa with correct args for copilot (gh copilot -p)', async () => { const shell = await import('../../../src/services/shell.js') - const { execa: mockedExeca } = await import('execa') + const {execa: mockedExeca} = await import('execa') vi.mocked(shell.which).mockResolvedValue('/usr/local/bin/gh') vi.mocked(mockedExeca).mockResolvedValue(/** @type {any} */ ({})) - const { invokeTool } = await import('../../../src/services/prompts.js') + const {invokeTool} = await import('../../../src/services/prompts.js') await invokeTool('copilot', 'Write tests for me') expect(mockedExeca).toHaveBeenCalledWith( 'gh', ['copilot', '-p', 'Write tests for me'], - expect.objectContaining({ stdio: 'inherit' }), + expect.objectContaining({stdio: 'inherit'}), ) }) @@ -391,19 +375,17 @@ describe('invokeTool', () => { const shell = await import('../../../src/services/shell.js') vi.mocked(shell.which).mockResolvedValue(null) - const { invokeTool } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {invokeTool} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(invokeTool('opencode', 'some prompt')).rejects.toThrow(DvmiError) await expect(invokeTool('opencode', 'some prompt')).rejects.toThrow(/not installed/i) }) it('throws DvmiError for an unknown tool name', async () => { - const { invokeTool } = await import('../../../src/services/prompts.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {invokeTool} = await import('../../../src/services/prompts.js') + const {DvmiError} = await import('../../../src/utils/errors.js') - await expect( - invokeTool(/** @type {any} */ ('unknown-tool'), 'some prompt'), - ).rejects.toThrow(DvmiError) + await expect(invokeTool(/** @type {any} */ ('unknown-tool'), 'some prompt')).rejects.toThrow(DvmiError) }) }) diff --git a/tests/services/prompts/skills-sh.test.js b/tests/services/prompts/skills-sh.test.js index 3c7ea9c..ddd9fbb 100644 --- a/tests/services/prompts/skills-sh.test.js +++ b/tests/services/prompts/skills-sh.test.js @@ -1,6 +1,6 @@ -import { describe, it, expect } from 'vitest' -import { http, HttpResponse } from 'msw' -import { server } from '../../setup.js' +import {describe, it, expect} from 'vitest' +import {http, HttpResponse} from 'msw' +import {server} from '../../setup.js' describe('searchSkills', () => { it('returns parsed Skill[] from skills.sh API (skills key)', async () => { @@ -10,8 +10,8 @@ describe('searchSkills', () => { query: 'review', searchType: 'fuzzy', skills: [ - { id: 'code-review', name: 'Code Review', description: 'Review code changes', installs: 1200 }, - { id: 'sql-gen', name: 'SQL Generator', description: 'Generate SQL queries', installs: 800 }, + {id: 'code-review', name: 'Code Review', description: 'Review code changes', installs: 1200}, + {id: 'sql-gen', name: 'SQL Generator', description: 'Generate SQL queries', installs: 800}, ], count: 2, duration_ms: 42, @@ -19,7 +19,7 @@ describe('searchSkills', () => { ), ) - const { searchSkills } = await import('../../../src/services/skills-sh.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') const skills = await searchSkills('review') expect(skills).toHaveLength(2) @@ -35,13 +35,13 @@ describe('searchSkills', () => { let capturedUrl server.use( - http.get('https://skills.sh/api/search', ({ request }) => { + http.get('https://skills.sh/api/search', ({request}) => { capturedUrl = new URL(request.url) - return HttpResponse.json({ skills: [] }) + return HttpResponse.json({skills: []}) }), ) - const { searchSkills } = await import('../../../src/services/skills-sh.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') await searchSkills('refactor', 10) expect(capturedUrl?.searchParams.get('q')).toBe('refactor') @@ -49,11 +49,9 @@ describe('searchSkills', () => { }) it('returns empty array when API returns empty skills array', async () => { - server.use( - http.get('https://skills.sh/api/search', () => HttpResponse.json({ skills: [], count: 0 })), - ) + server.use(http.get('https://skills.sh/api/search', () => HttpResponse.json({skills: [], count: 0}))) - const { searchSkills } = await import('../../../src/services/skills-sh.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') const skills = await searchSkills('coding') expect(Array.isArray(skills)).toBe(true) expect(skills).toHaveLength(0) @@ -62,19 +60,19 @@ describe('searchSkills', () => { it('also handles plain array response format', async () => { server.use( http.get('https://skills.sh/api/search', () => - HttpResponse.json([{ id: 'plain', name: 'Plain Skill', installs: 5 }]), + HttpResponse.json([{id: 'plain', name: 'Plain Skill', installs: 5}]), ), ) - const { searchSkills } = await import('../../../src/services/skills-sh.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') const skills = await searchSkills('plain') expect(skills).toHaveLength(1) expect(skills[0].id).toBe('plain') }) it('throws DvmiError when query is missing or too short', async () => { - const { searchSkills } = await import('../../../src/services/skills-sh.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(searchSkills('')).rejects.toThrow(DvmiError) await expect(searchSkills('a')).rejects.toThrow(DvmiError) @@ -83,13 +81,11 @@ describe('searchSkills', () => { it('throws DvmiError when API returns non-OK status', async () => { server.use( - http.get('https://skills.sh/api/search', () => - HttpResponse.json({ error: 'server error' }, { status: 500 }), - ), + http.get('https://skills.sh/api/search', () => HttpResponse.json({error: 'server error'}, {status: 500})), ) - const { searchSkills } = await import('../../../src/services/skills-sh.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {searchSkills} = await import('../../../src/services/skills-sh.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(searchSkills('test-query')).rejects.toThrow(DvmiError) }) diff --git a/tests/services/prompts/speckit.test.js b/tests/services/prompts/speckit.test.js index c8d36a7..66cb78e 100644 --- a/tests/services/prompts/speckit.test.js +++ b/tests/services/prompts/speckit.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' // ── Mock shell.js ──────────────────────────────────────────────────────────── vi.mock('../../../src/services/shell.js', () => ({ @@ -20,19 +20,19 @@ describe('isUvInstalled', () => { beforeEach(() => vi.resetAllMocks()) it('returns true when uv is found in PATH', async () => { - const { which } = await import('../../../src/services/shell.js') + const {which} = await import('../../../src/services/shell.js') vi.mocked(which).mockResolvedValue('/usr/local/bin/uv') - const { isUvInstalled } = await import('../../../src/services/speckit.js') + const {isUvInstalled} = await import('../../../src/services/speckit.js') expect(await isUvInstalled()).toBe(true) expect(which).toHaveBeenCalledWith('uv') }) it('returns false when uv is not found in PATH', async () => { - const { which } = await import('../../../src/services/shell.js') + const {which} = await import('../../../src/services/shell.js') vi.mocked(which).mockResolvedValue(null) - const { isUvInstalled } = await import('../../../src/services/speckit.js') + const {isUvInstalled} = await import('../../../src/services/speckit.js') expect(await isUvInstalled()).toBe(false) }) }) @@ -41,19 +41,19 @@ describe('isSpecifyInstalled', () => { beforeEach(() => vi.resetAllMocks()) it('returns true when specify is found in PATH', async () => { - const { which } = await import('../../../src/services/shell.js') + const {which} = await import('../../../src/services/shell.js') vi.mocked(which).mockResolvedValue('/usr/local/bin/specify') - const { isSpecifyInstalled } = await import('../../../src/services/speckit.js') + const {isSpecifyInstalled} = await import('../../../src/services/speckit.js') expect(await isSpecifyInstalled()).toBe(true) expect(which).toHaveBeenCalledWith('specify') }) it('returns false when specify is not found', async () => { - const { which } = await import('../../../src/services/shell.js') + const {which} = await import('../../../src/services/shell.js') vi.mocked(which).mockResolvedValue(null) - const { isSpecifyInstalled } = await import('../../../src/services/speckit.js') + const {isSpecifyInstalled} = await import('../../../src/services/speckit.js') expect(await isSpecifyInstalled()).toBe(false) }) }) @@ -62,36 +62,33 @@ describe('installSpecifyCli', () => { beforeEach(() => vi.resetAllMocks()) it('runs uv tool install with the correct arguments', async () => { - const { exec } = await import('../../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: 'Installed', stderr: '', exitCode: 0 }) + const {exec} = await import('../../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: 'Installed', stderr: '', exitCode: 0}) - const { installSpecifyCli } = await import('../../../src/services/speckit.js') + const {installSpecifyCli} = await import('../../../src/services/speckit.js') const result = await installSpecifyCli() - expect(exec).toHaveBeenCalledWith( - 'uv', - expect.arrayContaining(['tool', 'install', 'specify-cli', '--from']), - ) + expect(exec).toHaveBeenCalledWith('uv', expect.arrayContaining(['tool', 'install', 'specify-cli', '--from'])) expect(result.exitCode).toBe(0) }) it('passes --force when opts.force is true', async () => { - const { exec } = await import('../../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: '', stderr: '', exitCode: 0 }) + const {exec} = await import('../../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: '', stderr: '', exitCode: 0}) - const { installSpecifyCli } = await import('../../../src/services/speckit.js') - await installSpecifyCli({ force: true }) + const {installSpecifyCli} = await import('../../../src/services/speckit.js') + await installSpecifyCli({force: true}) const args = vi.mocked(exec).mock.calls[0][1] expect(args).toContain('--force') }) it('throws DvmiError when uv exits non-zero', async () => { - const { exec } = await import('../../../src/services/shell.js') - vi.mocked(exec).mockResolvedValue({ stdout: '', stderr: 'error: package not found', exitCode: 1 }) + const {exec} = await import('../../../src/services/shell.js') + vi.mocked(exec).mockResolvedValue({stdout: '', stderr: 'error: package not found', exitCode: 1}) - const { installSpecifyCli } = await import('../../../src/services/speckit.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {installSpecifyCli} = await import('../../../src/services/speckit.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(installSpecifyCli()).rejects.toBeInstanceOf(DvmiError) }) @@ -101,25 +98,25 @@ describe('runSpecifyInit', () => { beforeEach(() => vi.resetAllMocks()) it('runs specify init --here in the given directory', async () => { - const { execa } = await import('execa') - vi.mocked(execa).mockResolvedValue({ exitCode: 0 }) + const {execa} = await import('execa') + vi.mocked(execa).mockResolvedValue({exitCode: 0}) - const { runSpecifyInit } = await import('../../../src/services/speckit.js') + const {runSpecifyInit} = await import('../../../src/services/speckit.js') await runSpecifyInit('/my/project') expect(execa).toHaveBeenCalledWith( 'specify', expect.arrayContaining(['init', '--here']), - expect.objectContaining({ cwd: '/my/project', stdio: 'inherit' }), + expect.objectContaining({cwd: '/my/project', stdio: 'inherit'}), ) }) it('passes --ai flag when provided', async () => { - const { execa } = await import('execa') - vi.mocked(execa).mockResolvedValue({ exitCode: 0 }) + const {execa} = await import('execa') + vi.mocked(execa).mockResolvedValue({exitCode: 0}) - const { runSpecifyInit } = await import('../../../src/services/speckit.js') - await runSpecifyInit('/my/project', { ai: 'opencode' }) + const {runSpecifyInit} = await import('../../../src/services/speckit.js') + await runSpecifyInit('/my/project', {ai: 'opencode'}) const args = vi.mocked(execa).mock.calls[0][1] expect(args).toContain('--ai') @@ -127,22 +124,22 @@ describe('runSpecifyInit', () => { }) it('passes --force flag when provided', async () => { - const { execa } = await import('execa') - vi.mocked(execa).mockResolvedValue({ exitCode: 0 }) + const {execa} = await import('execa') + vi.mocked(execa).mockResolvedValue({exitCode: 0}) - const { runSpecifyInit } = await import('../../../src/services/speckit.js') - await runSpecifyInit('/my/project', { force: true }) + const {runSpecifyInit} = await import('../../../src/services/speckit.js') + await runSpecifyInit('/my/project', {force: true}) const args = vi.mocked(execa).mock.calls[0][1] expect(args).toContain('--force') }) it('throws DvmiError when specify init exits non-zero', async () => { - const { execa } = await import('execa') - vi.mocked(execa).mockResolvedValue({ exitCode: 1 }) + const {execa} = await import('execa') + vi.mocked(execa).mockResolvedValue({exitCode: 1}) - const { runSpecifyInit } = await import('../../../src/services/speckit.js') - const { DvmiError } = await import('../../../src/utils/errors.js') + const {runSpecifyInit} = await import('../../../src/services/speckit.js') + const {DvmiError} = await import('../../../src/utils/errors.js') await expect(runSpecifyInit('/my/project')).rejects.toBeInstanceOf(DvmiError) }) diff --git a/tests/services/security/security.test.js b/tests/services/security/security.test.js index 361bc80..2ef7891 100644 --- a/tests/services/security/security.test.js +++ b/tests/services/security/security.test.js @@ -1,7 +1,7 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' -import { writeFile, unlink, mkdtemp } from 'node:fs/promises' -import { join } from 'node:path' -import { tmpdir } from 'node:os' +import {describe, it, expect, vi, beforeEach, afterEach} from 'vitest' +import {writeFile, unlink, mkdtemp} from 'node:fs/promises' +import {join} from 'node:path' +import {tmpdir} from 'node:os' // We mock shell.js before importing security.js vi.mock('../../../src/services/shell.js', () => ({ @@ -10,8 +10,8 @@ vi.mock('../../../src/services/shell.js', () => ({ execOrThrow: vi.fn(), })) -import { checkToolStatus, appendToShellProfile, deriveOverallStatus } from '../../../src/services/security.js' -import { which, exec } from '../../../src/services/shell.js' +import {checkToolStatus, appendToShellProfile, deriveOverallStatus} from '../../../src/services/security.js' +import {which, exec} from '../../../src/services/shell.js' beforeEach(() => { vi.resetAllMocks() @@ -23,7 +23,7 @@ beforeEach(() => { describe('checkToolStatus() — macOS', () => { it('returns n/a for gpg, pass, gcm on macOS', async () => { which.mockResolvedValue(null) - exec.mockResolvedValue({ stdout: '', stderr: '', exitCode: 1 }) + exec.mockResolvedValue({stdout: '', stderr: '', exitCode: 1}) const tools = await checkToolStatus('macos') const ids = tools.map((t) => t.id) @@ -41,9 +41,9 @@ describe('checkToolStatus() — macOS', () => { it('aws-vault installed on macOS returns installed', async () => { which.mockImplementation((cmd) => (cmd === 'aws-vault' ? '/usr/local/bin/aws-vault' : null)) exec.mockImplementation((cmd, _args) => { - if (cmd === 'aws-vault') return Promise.resolve({ stdout: 'v6.6.2', stderr: '', exitCode: 0 }) - if (cmd === 'git') return Promise.resolve({ stdout: 'osxkeychain', stderr: '', exitCode: 0 }) - return Promise.resolve({ stdout: '', stderr: '', exitCode: 0 }) + if (cmd === 'aws-vault') return Promise.resolve({stdout: 'v6.6.2', stderr: '', exitCode: 0}) + if (cmd === 'git') return Promise.resolve({stdout: 'osxkeychain', stderr: '', exitCode: 0}) + return Promise.resolve({stdout: '', stderr: '', exitCode: 0}) }) const tools = await checkToolStatus('macos') @@ -54,7 +54,7 @@ describe('checkToolStatus() — macOS', () => { it('aws-vault not installed returns not-installed', async () => { which.mockResolvedValue(null) - exec.mockResolvedValue({ stdout: '', stderr: '', exitCode: 1 }) + exec.mockResolvedValue({stdout: '', stderr: '', exitCode: 1}) const tools = await checkToolStatus('macos') const awsVault = tools.find((t) => t.id === 'aws-vault') @@ -65,9 +65,9 @@ describe('checkToolStatus() — macOS', () => { which.mockResolvedValue(null) exec.mockImplementation((cmd, args) => { if (cmd === 'git' && args?.includes('credential.helper')) { - return Promise.resolve({ stdout: 'osxkeychain', stderr: '', exitCode: 0 }) + return Promise.resolve({stdout: 'osxkeychain', stderr: '', exitCode: 0}) } - return Promise.resolve({ stdout: '', stderr: '', exitCode: 1 }) + return Promise.resolve({stdout: '', stderr: '', exitCode: 1}) }) const tools = await checkToolStatus('macos') @@ -79,9 +79,9 @@ describe('checkToolStatus() — macOS', () => { which.mockResolvedValue(null) exec.mockImplementation((cmd, args) => { if (cmd === 'git' && args?.includes('credential.helper')) { - return Promise.resolve({ stdout: 'store', stderr: '', exitCode: 0 }) + return Promise.resolve({stdout: 'store', stderr: '', exitCode: 0}) } - return Promise.resolve({ stdout: '', stderr: '', exitCode: 1 }) + return Promise.resolve({stdout: '', stderr: '', exitCode: 1}) }) const tools = await checkToolStatus('macos') @@ -96,7 +96,7 @@ describe('checkToolStatus() — macOS', () => { describe('checkToolStatus() — Linux', () => { it('returns n/a for osxkeychain on Linux', async () => { which.mockResolvedValue(null) - exec.mockResolvedValue({ stdout: '', stderr: '', exitCode: 1 }) + exec.mockResolvedValue({stdout: '', stderr: '', exitCode: 1}) const tools = await checkToolStatus('linux') const ks = tools.find((t) => t.id === 'osxkeychain') @@ -109,8 +109,8 @@ describe('checkToolStatus() — Linux', () => { which.mockImplementation((cmd) => (cmd === 'aws-vault' ? '/usr/local/bin/aws-vault' : null)) exec.mockImplementation((cmd) => { - if (cmd === 'aws-vault') return Promise.resolve({ stdout: 'v6.6.2', stderr: '', exitCode: 0 }) - return Promise.resolve({ stdout: '', stderr: '', exitCode: 1 }) + if (cmd === 'aws-vault') return Promise.resolve({stdout: 'v6.6.2', stderr: '', exitCode: 0}) + return Promise.resolve({stdout: '', stderr: '', exitCode: 1}) }) const tools = await checkToolStatus('linux') @@ -125,8 +125,8 @@ describe('checkToolStatus() — Linux', () => { which.mockImplementation((cmd) => (cmd === 'aws-vault' ? '/usr/local/bin/aws-vault' : null)) exec.mockImplementation((cmd) => { - if (cmd === 'aws-vault') return Promise.resolve({ stdout: 'v6.6.2', stderr: '', exitCode: 0 }) - return Promise.resolve({ stdout: '', stderr: '', exitCode: 0 }) + if (cmd === 'aws-vault') return Promise.resolve({stdout: 'v6.6.2', stderr: '', exitCode: 0}) + return Promise.resolve({stdout: '', stderr: '', exitCode: 0}) }) const tools = await checkToolStatus('linux') @@ -139,11 +139,11 @@ describe('checkToolStatus() — Linux', () => { it('gcm misconfigured when credential.credentialStore != gpg', async () => { which.mockImplementation((cmd) => (cmd === 'git-credential-manager' ? '/usr/bin/git-credential-manager' : null)) exec.mockImplementation((cmd, args) => { - if (cmd === 'git-credential-manager') return Promise.resolve({ stdout: '2.4.1', stderr: '', exitCode: 0 }) + if (cmd === 'git-credential-manager') return Promise.resolve({stdout: '2.4.1', stderr: '', exitCode: 0}) if (cmd === 'git' && args?.includes('credential.credentialStore')) { - return Promise.resolve({ stdout: 'plaintext', stderr: '', exitCode: 0 }) + return Promise.resolve({stdout: 'plaintext', stderr: '', exitCode: 0}) } - return Promise.resolve({ stdout: '', stderr: '', exitCode: 1 }) + return Promise.resolve({stdout: '', stderr: '', exitCode: 1}) }) const tools = await checkToolStatus('linux') @@ -179,7 +179,7 @@ describe('appendToShellProfile()', () => { await writeFile(profilePath, '# existing\n') await appendToShellProfile('export AWS_VAULT_BACKEND=pass') - const { readFile } = await import('node:fs/promises') + const {readFile} = await import('node:fs/promises') const contents = await readFile(profilePath, 'utf8') expect(contents).toContain('export AWS_VAULT_BACKEND=pass') }) @@ -189,7 +189,7 @@ describe('appendToShellProfile()', () => { await appendToShellProfile('export AWS_VAULT_BACKEND=pass') await appendToShellProfile('export AWS_VAULT_BACKEND=pass') - const { readFile } = await import('node:fs/promises') + const {readFile} = await import('node:fs/promises') const contents = await readFile(profilePath, 'utf8') const count = (contents.match(/export AWS_VAULT_BACKEND=pass/g) ?? []).length expect(count).toBe(1) @@ -202,40 +202,38 @@ describe('appendToShellProfile()', () => { describe('deriveOverallStatus()', () => { it('returns success when all applicable tools are installed', () => { const tools = [ - { id: 'aws-vault', displayName: 'aws-vault', status: 'installed', version: '6.6.2', hint: null }, - { id: 'gcm', displayName: 'GCM', status: 'n/a', version: null, hint: null }, - { id: 'osxkeychain', displayName: 'Keychain', status: 'installed', version: null, hint: null }, + {id: 'aws-vault', displayName: 'aws-vault', status: 'installed', version: '6.6.2', hint: null}, + {id: 'gcm', displayName: 'GCM', status: 'n/a', version: null, hint: null}, + {id: 'osxkeychain', displayName: 'Keychain', status: 'installed', version: null, hint: null}, ] expect(deriveOverallStatus(tools)).toBe('success') }) it('returns partial when some tools are installed and some are not', () => { const tools = [ - { id: 'aws-vault', displayName: 'aws-vault', status: 'installed', version: '6.6.2', hint: null }, - { id: 'osxkeychain', displayName: 'Keychain', status: 'not-installed', version: null, hint: null }, + {id: 'aws-vault', displayName: 'aws-vault', status: 'installed', version: '6.6.2', hint: null}, + {id: 'osxkeychain', displayName: 'Keychain', status: 'not-installed', version: null, hint: null}, ] expect(deriveOverallStatus(tools)).toBe('partial') }) it('returns not-configured when no applicable tools are installed', () => { const tools = [ - { id: 'aws-vault', displayName: 'aws-vault', status: 'not-installed', version: null, hint: null }, - { id: 'osxkeychain', displayName: 'Keychain', status: 'not-installed', version: null, hint: null }, + {id: 'aws-vault', displayName: 'aws-vault', status: 'not-installed', version: null, hint: null}, + {id: 'osxkeychain', displayName: 'Keychain', status: 'not-installed', version: null, hint: null}, ] expect(deriveOverallStatus(tools)).toBe('not-configured') }) it('returns not-configured when all tools are n/a', () => { - const tools = [ - { id: 'gcm', displayName: 'GCM', status: 'n/a', version: null, hint: null }, - ] + const tools = [{id: 'gcm', displayName: 'GCM', status: 'n/a', version: null, hint: null}] expect(deriveOverallStatus(tools)).toBe('not-configured') }) it('treats misconfigured as not-installed for partial calculation', () => { const tools = [ - { id: 'aws-vault', displayName: 'aws-vault', status: 'misconfigured', version: null, hint: null }, - { id: 'osxkeychain', displayName: 'Keychain', status: 'installed', version: null, hint: null }, + {id: 'aws-vault', displayName: 'aws-vault', status: 'misconfigured', version: null, hint: null}, + {id: 'osxkeychain', displayName: 'Keychain', status: 'installed', version: null, hint: null}, ] expect(deriveOverallStatus(tools)).toBe('partial') }) diff --git a/tests/services/version-check.test.js b/tests/services/version-check.test.js index 4b2a910..7681ee4 100644 --- a/tests/services/version-check.test.js +++ b/tests/services/version-check.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, beforeEach } from 'vitest' +import {describe, it, expect, vi, beforeEach} from 'vitest' vi.mock('../../src/services/config.js', () => ({ loadConfig: vi.fn(), @@ -17,50 +17,56 @@ describe('checkForUpdate', () => { beforeEach(() => vi.clearAllMocks()) it('returns hasUpdate true when newer version available', async () => { - const { loadConfig } = await import('../../src/services/config.js') - const { exec } = await import('../../src/services/shell.js') + const {loadConfig} = await import('../../src/services/config.js') + const {exec} = await import('../../src/services/shell.js') vi.mocked(loadConfig).mockResolvedValue({ - org: 'acme', awsProfile: 'dev', awsRegion: 'eu-west-1', + org: 'acme', + awsProfile: 'dev', + awsRegion: 'eu-west-1', lastVersionCheck: new Date(Date.now() - 25 * 60 * 60 * 1000).toISOString(), }) - vi.mocked(exec).mockResolvedValue({ exitCode: 0, stdout: 'v9.9.9', stderr: '' }) + vi.mocked(exec).mockResolvedValue({exitCode: 0, stdout: 'v9.9.9', stderr: ''}) - const { checkForUpdate } = await import('../../src/services/version-check.js') - const { hasUpdate, latest } = await checkForUpdate({ force: true }) + const {checkForUpdate} = await import('../../src/services/version-check.js') + const {hasUpdate, latest} = await checkForUpdate({force: true}) expect(latest).toBe('9.9.9') // il prefisso "v" viene rimosso expect(hasUpdate).toBe(true) }) it('uses cached version when check is within 24h', async () => { - const { loadConfig } = await import('../../src/services/config.js') - const { exec } = await import('../../src/services/shell.js') - const { getCurrentVersion } = await import('../../src/services/version-check.js') + const {loadConfig} = await import('../../src/services/config.js') + const {exec} = await import('../../src/services/shell.js') + const {getCurrentVersion} = await import('../../src/services/version-check.js') const currentVersion = await getCurrentVersion() vi.mocked(loadConfig).mockResolvedValue({ - org: 'acme', awsProfile: 'dev', awsRegion: 'eu-west-1', + org: 'acme', + awsProfile: 'dev', + awsRegion: 'eu-west-1', lastVersionCheck: new Date().toISOString(), latestVersion: currentVersion, // stessa versione installata → nessun aggiornamento }) - const { checkForUpdate } = await import('../../src/services/version-check.js') - const { hasUpdate } = await checkForUpdate() + const {checkForUpdate} = await import('../../src/services/version-check.js') + const {hasUpdate} = await checkForUpdate() expect(vi.mocked(exec)).not.toHaveBeenCalled() expect(hasUpdate).toBe(false) }) it('returns hasUpdate false when registry unreachable', async () => { - const { loadConfig } = await import('../../src/services/config.js') - const { exec } = await import('../../src/services/shell.js') + const {loadConfig} = await import('../../src/services/config.js') + const {exec} = await import('../../src/services/shell.js') vi.mocked(loadConfig).mockResolvedValue({ - org: 'acme', awsProfile: 'dev', awsRegion: 'eu-west-1', + org: 'acme', + awsProfile: 'dev', + awsRegion: 'eu-west-1', }) vi.mocked(exec).mockRejectedValue(new Error('network error')) - const { checkForUpdate } = await import('../../src/services/version-check.js') - const { hasUpdate } = await checkForUpdate({ force: true }) + const {checkForUpdate} = await import('../../src/services/version-check.js') + const {hasUpdate} = await checkForUpdate({force: true}) expect(hasUpdate).toBe(false) }) }) diff --git a/tests/setup.js b/tests/setup.js index 9a0ac8b..6b7b0a8 100644 --- a/tests/setup.js +++ b/tests/setup.js @@ -1,9 +1,9 @@ -import { setupServer } from 'msw/node' -import { handlers } from './fixtures/msw-handlers.js' -import { beforeAll, afterAll, afterEach } from 'vitest' +import {setupServer} from 'msw/node' +import {handlers} from './fixtures/msw-handlers.js' +import {beforeAll, afterAll, afterEach} from 'vitest' export const server = setupServer(...handlers) -beforeAll(() => server.listen({ onUnhandledRequest: 'warn' })) +beforeAll(() => server.listen({onUnhandledRequest: 'warn'})) afterEach(() => server.resetHandlers()) afterAll(() => server.close()) diff --git a/tests/snapshots/__snapshots__/sync-config-ai.test.js.snap b/tests/snapshots/__snapshots__/sync-config-ai.test.js.snap new file mode 100644 index 0000000..cb42b1c --- /dev/null +++ b/tests/snapshots/__snapshots__/sync-config-ai.test.js.snap @@ -0,0 +1,23 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`dvmi sync-config-ai snapshots > --help output matches snapshot 1`] = ` +"Manage AI coding tool configurations across environments via TUI + +USAGE + $ dvmi sync-config-ai [--json] [-h] + +FLAGS + -h, --help Show CLI help. + +GLOBAL FLAGS + --json Format output as json. + +DESCRIPTION + Manage AI coding tool configurations across environments via TUI + +EXAMPLES + $ dvmi sync-config-ai + + $ dvmi sync-config-ai --json +" +`; diff --git a/tests/snapshots/sync-config-ai.test.js b/tests/snapshots/sync-config-ai.test.js new file mode 100644 index 0000000..29ef362 --- /dev/null +++ b/tests/snapshots/sync-config-ai.test.js @@ -0,0 +1,10 @@ +import {describe, it, expect} from 'vitest' +import {runCli} from '../integration/helpers.js' + +describe('dvmi sync-config-ai snapshots', () => { + it('--help output matches snapshot', async () => { + const {stdout, exitCode} = await runCli(['sync-config-ai', '--help']) + expect(exitCode).toBe(0) + expect(stdout).toMatchSnapshot() + }) +}) diff --git a/tests/unit/formatters/charts.test.js b/tests/unit/formatters/charts.test.js index afe343e..e6cd1e2 100644 --- a/tests/unit/formatters/charts.test.js +++ b/tests/unit/formatters/charts.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { barChart, lineChart } from '../../../src/formatters/charts.js' +import {describe, it, expect} from 'vitest' +import {barChart, lineChart} from '../../../src/formatters/charts.js' /** @type {import('../../../src/formatters/charts.js').ChartSeries} */ const singleSeries = { @@ -23,7 +23,7 @@ describe('barChart', () => { }) it('includes the title when provided', () => { - const result = barChart([singleSeries], { title: 'My Cost Chart' }) + const result = barChart([singleSeries], {title: 'My Cost Chart'}) expect(result).toContain('My Cost Chart') }) @@ -44,13 +44,13 @@ describe('barChart', () => { }) it('handles a series with all-zero values without errors', () => { - const zeroSeries = { name: 'Empty', values: [0, 0, 0], labels: ['2026-01-01', '2026-01-02', '2026-01-03'] } + const zeroSeries = {name: 'Empty', values: [0, 0, 0], labels: ['2026-01-01', '2026-01-02', '2026-01-03']} const result = barChart([zeroSeries]) expect(typeof result).toBe('string') }) it('respects the width option', () => { - const result = barChart([singleSeries], { width: 60 }) + const result = barChart([singleSeries], {width: 60}) const lines = result.split('\n') // No line should far exceed the requested width (accounting for ANSI escape codes stripped) expect(lines.length).toBeGreaterThan(0) @@ -65,7 +65,7 @@ describe('lineChart', () => { }) it('includes the title when provided', () => { - const result = lineChart([singleSeries], { title: 'My Trend' }) + const result = lineChart([singleSeries], {title: 'My Trend'}) expect(result).toContain('My Trend') }) @@ -81,7 +81,7 @@ describe('lineChart', () => { }) it('handles single data point without errors', () => { - const onePt = { name: 'EC2', values: [42], labels: ['2026-01-01'] } + const onePt = {name: 'EC2', values: [42], labels: ['2026-01-01']} const result = lineChart([onePt]) expect(typeof result).toBe('string') }) diff --git a/tests/unit/formatters/cost.test.js b/tests/unit/formatters/cost.test.js index d52eae4..e94c350 100644 --- a/tests/unit/formatters/cost.test.js +++ b/tests/unit/formatters/cost.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { formatCurrency, calculateTotal, formatTrend, formatCostTable } from '../../../src/formatters/cost.js' +import {describe, it, expect} from 'vitest' +import {formatCurrency, calculateTotal, formatTrend, formatCostTable} from '../../../src/formatters/cost.js' describe('formatCurrency', () => { it('formats positive amount', () => { @@ -18,8 +18,8 @@ describe('formatCurrency', () => { describe('calculateTotal', () => { it('sums amounts', () => { const entries = [ - { serviceName: 'Lambda', amount: 10.5, unit: 'USD', period: { start: '', end: '' } }, - { serviceName: 'API Gateway', amount: 5.25, unit: 'USD', period: { start: '', end: '' } }, + {serviceName: 'Lambda', amount: 10.5, unit: 'USD', period: {start: '', end: ''}}, + {serviceName: 'API Gateway', amount: 5.25, unit: 'USD', period: {start: '', end: ''}}, ] expect(calculateTotal(entries)).toBeCloseTo(15.75) }) @@ -46,7 +46,7 @@ describe('formatTrend', () => { describe('formatCostTable', () => { it('includes service name and total', () => { const entries = [ - { serviceName: 'AWS Lambda', amount: 12.34, unit: 'USD', period: { start: '2026-02-01', end: '2026-03-01' } }, + {serviceName: 'AWS Lambda', amount: 12.34, unit: 'USD', period: {start: '2026-02-01', end: '2026-03-01'}}, ] const output = formatCostTable(entries, 'my-service') expect(output).toContain('my-service') diff --git a/tests/unit/formatters/dotfiles.test.js b/tests/unit/formatters/dotfiles.test.js index 0a8d02c..970543a 100644 --- a/tests/unit/formatters/dotfiles.test.js +++ b/tests/unit/formatters/dotfiles.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect } from 'vitest' +import {describe, it, expect} from 'vitest' import { formatDotfilesSetup, formatDotfilesSummary, @@ -12,17 +12,17 @@ import { // --------------------------------------------------------------------------- describe('formatDotfilesSummary()', () => { it('formats counts correctly', () => { - const result = formatDotfilesSummary({ total: 12, encrypted: 3, plaintext: 9 }) + const result = formatDotfilesSummary({total: 12, encrypted: 3, plaintext: 9}) expect(result).toBe('12 total: 9 plaintext, 3 encrypted') }) it('handles zero counts', () => { - const result = formatDotfilesSummary({ total: 0, encrypted: 0, plaintext: 0 }) + const result = formatDotfilesSummary({total: 0, encrypted: 0, plaintext: 0}) expect(result).toBe('0 total: 0 plaintext, 0 encrypted') }) it('handles all encrypted', () => { - const result = formatDotfilesSummary({ total: 5, encrypted: 5, plaintext: 0 }) + const result = formatDotfilesSummary({total: 5, encrypted: 5, plaintext: 0}) expect(result).toBe('5 total: 0 plaintext, 5 encrypted') }) }) @@ -112,10 +112,20 @@ describe('formatDotfilesStatus()', () => { repo: 'git@github.com:user/dotfiles.git', sourceDir: '/Users/dev/.local/share/chezmoi', files: [ - { path: '/Users/dev/.zshrc', sourcePath: '/Users/dev/.local/share/chezmoi/dot_zshrc', encrypted: false, type: 'file' }, - { path: '/Users/dev/.ssh/id_ed25519', sourcePath: '/Users/dev/.local/share/chezmoi/encrypted_id_ed25519.age', encrypted: true, type: 'file' }, + { + path: '/Users/dev/.zshrc', + sourcePath: '/Users/dev/.local/share/chezmoi/dot_zshrc', + encrypted: false, + type: 'file', + }, + { + path: '/Users/dev/.ssh/id_ed25519', + sourcePath: '/Users/dev/.local/share/chezmoi/encrypted_id_ed25519.age', + encrypted: true, + type: 'file', + }, ], - summary: { total: 2, encrypted: 1, plaintext: 1 }, + summary: {total: 2, encrypted: 1, plaintext: 1}, }) expect(result).toContain('macos') expect(result).toContain('/Users/dev/.local/share/chezmoi') @@ -133,7 +143,7 @@ describe('formatDotfilesStatus()', () => { repo: null, sourceDir: null, files: [], - summary: { total: 0, encrypted: 0, plaintext: 0 }, + summary: {total: 0, encrypted: 0, plaintext: 0}, }) expect(result).toContain('dvmi dotfiles setup') }) @@ -147,7 +157,7 @@ describe('formatDotfilesStatus()', () => { repo: null, sourceDir: '/Users/dev/.local/share/chezmoi', files: [], - summary: { total: 0, encrypted: 0, plaintext: 0 }, + summary: {total: 0, encrypted: 0, plaintext: 0}, }) expect(result).toContain('dvmi dotfiles add') }) @@ -160,10 +170,8 @@ describe('formatDotfilesStatus()', () => { encryptionConfigured: true, repo: null, sourceDir: null, - files: [ - { path: '/Users/dev/.ssh/id_ed25519', sourcePath: '', encrypted: true, type: 'file' }, - ], - summary: { total: 1, encrypted: 1, plaintext: 0 }, + files: [{path: '/Users/dev/.ssh/id_ed25519', sourcePath: '', encrypted: true, type: 'file'}], + summary: {total: 1, encrypted: 1, plaintext: 0}, }) expect(result).toContain('encrypted') }) @@ -177,7 +185,7 @@ describe('formatDotfilesStatus()', () => { repo: null, sourceDir: null, files: [], - summary: { total: 0, encrypted: 0, plaintext: 0 }, + summary: {total: 0, encrypted: 0, plaintext: 0}, }) expect(result).toContain('not configured') }) @@ -191,7 +199,7 @@ describe('formatDotfilesStatus()', () => { repo: null, sourceDir: null, files: [], - summary: { total: 0, encrypted: 0, plaintext: 0 }, + summary: {total: 0, encrypted: 0, plaintext: 0}, }) expect(result).toContain('not configured') }) @@ -204,8 +212,8 @@ describe('formatDotfilesAdd()', () => { it('shows added files with encryption status', () => { const result = formatDotfilesAdd({ added: [ - { path: '~/.zshrc', encrypted: false }, - { path: '~/.ssh/id_ed25519', encrypted: true }, + {path: '~/.zshrc', encrypted: false}, + {path: '~/.ssh/id_ed25519', encrypted: true}, ], skipped: [], rejected: [], @@ -219,7 +227,7 @@ describe('formatDotfilesAdd()', () => { it('shows skipped files with reason', () => { const result = formatDotfilesAdd({ added: [], - skipped: [{ path: '~/.bashrc', reason: 'File not found' }], + skipped: [{path: '~/.bashrc', reason: 'File not found'}], rejected: [], }) expect(result).toContain('~/.bashrc') @@ -231,7 +239,7 @@ describe('formatDotfilesAdd()', () => { const result = formatDotfilesAdd({ added: [], skipped: [], - rejected: [{ path: '/mnt/c/Users/dev/.gitconfig', reason: 'Windows filesystem paths not supported on WSL2' }], + rejected: [{path: '/mnt/c/Users/dev/.gitconfig', reason: 'Windows filesystem paths not supported on WSL2'}], }) expect(result).toContain('/mnt/c/Users/dev/.gitconfig') expect(result).toContain('Windows filesystem paths not supported on WSL2') @@ -239,7 +247,7 @@ describe('formatDotfilesAdd()', () => { }) it('shows no files processed message when all empty', () => { - const result = formatDotfilesAdd({ added: [], skipped: [], rejected: [] }) + const result = formatDotfilesAdd({added: [], skipped: [], rejected: []}) expect(result).toContain('No files processed') }) }) diff --git a/tests/unit/formatters/markdown.test.js b/tests/unit/formatters/markdown.test.js index ed49661..30223c6 100644 --- a/tests/unit/formatters/markdown.test.js +++ b/tests/unit/formatters/markdown.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { renderMarkdown, extractMermaidBlocks, toMermaidLiveUrl } from '../../../src/formatters/markdown.js' +import {describe, it, expect} from 'vitest' +import {renderMarkdown, extractMermaidBlocks, toMermaidLiveUrl} from '../../../src/formatters/markdown.js' describe('renderMarkdown', () => { it('returns a non-empty string for markdown input', () => { diff --git a/tests/unit/formatters/openapi.test.js b/tests/unit/formatters/openapi.test.js index 851fe9e..71b369b 100644 --- a/tests/unit/formatters/openapi.test.js +++ b/tests/unit/formatters/openapi.test.js @@ -1,10 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { - parseOpenApi, - parseAsyncApi, - isOpenApi, - isAsyncApi, -} from '../../../src/formatters/openapi.js' +import {describe, it, expect} from 'vitest' +import {parseOpenApi, parseAsyncApi, isOpenApi, isAsyncApi} from '../../../src/formatters/openapi.js' const OPENAPI_3_YAML = ` openapi: '3.0.0' @@ -66,34 +61,34 @@ channels: describe('isOpenApi', () => { it('returns true for OpenAPI 3.x doc', () => { - expect(isOpenApi({ openapi: '3.0.0' })).toBe(true) + expect(isOpenApi({openapi: '3.0.0'})).toBe(true) }) it('returns true for Swagger 2.0 doc', () => { - expect(isOpenApi({ swagger: '2.0' })).toBe(true) + expect(isOpenApi({swagger: '2.0'})).toBe(true) }) it('returns false for non-OpenAPI doc', () => { - expect(isOpenApi({ asyncapi: '2.0.0' })).toBe(false) + expect(isOpenApi({asyncapi: '2.0.0'})).toBe(false) expect(isOpenApi({})).toBe(false) }) }) describe('isAsyncApi', () => { it('returns true for AsyncAPI doc', () => { - expect(isAsyncApi({ asyncapi: '2.0.0' })).toBe(true) - expect(isAsyncApi({ asyncapi: '3.0.0' })).toBe(true) + expect(isAsyncApi({asyncapi: '2.0.0'})).toBe(true) + expect(isAsyncApi({asyncapi: '3.0.0'})).toBe(true) }) it('returns false for non-AsyncAPI doc', () => { - expect(isAsyncApi({ openapi: '3.0.0' })).toBe(false) + expect(isAsyncApi({openapi: '3.0.0'})).toBe(false) expect(isAsyncApi({})).toBe(false) }) }) describe('parseOpenApi', () => { it('parses a valid OpenAPI 3.x YAML', () => { - const { endpoints, error } = parseOpenApi(OPENAPI_3_YAML) + const {endpoints, error} = parseOpenApi(OPENAPI_3_YAML) expect(error).toBeNull() expect(endpoints.length).toBeGreaterThan(0) @@ -107,19 +102,19 @@ describe('parseOpenApi', () => { }) it('parses a valid Swagger 2.0 YAML', () => { - const { endpoints, error } = parseOpenApi(SWAGGER_2_YAML) + const {endpoints, error} = parseOpenApi(SWAGGER_2_YAML) expect(error).toBeNull() expect(endpoints.some((e) => e.path === '/items' && e.method === 'GET')).toBe(true) }) it('returns error for non-OpenAPI content', () => { - const { endpoints, error } = parseOpenApi('# Just a markdown document') + const {endpoints, error} = parseOpenApi('# Just a markdown document') expect(endpoints).toHaveLength(0) expect(error).toBeTruthy() }) it('returns error for invalid YAML', () => { - const { endpoints, error } = parseOpenApi(': invalid: yaml: :::') + const {endpoints, error} = parseOpenApi(': invalid: yaml: :::') expect(endpoints).toHaveLength(0) expect(error).toBeTruthy() }) @@ -127,12 +122,12 @@ describe('parseOpenApi', () => { it('parses valid JSON input', () => { const json = JSON.stringify({ openapi: '3.0.0', - info: { title: 'T', version: '1' }, + info: {title: 'T', version: '1'}, paths: { - '/ping': { get: { summary: 'Ping' } }, + '/ping': {get: {summary: 'Ping'}}, }, }) - const { endpoints, error } = parseOpenApi(json) + const {endpoints, error} = parseOpenApi(json) expect(error).toBeNull() expect(endpoints[0].path).toBe('/ping') }) @@ -140,7 +135,7 @@ describe('parseOpenApi', () => { describe('parseAsyncApi', () => { it('parses AsyncAPI 2.x YAML with publish/subscribe', () => { - const { channels, error } = parseAsyncApi(ASYNCAPI_2_YAML) + const {channels, error} = parseAsyncApi(ASYNCAPI_2_YAML) expect(error).toBeNull() expect(channels.length).toBeGreaterThanOrEqual(2) @@ -155,13 +150,13 @@ describe('parseAsyncApi', () => { }) it('returns error for non-AsyncAPI content', () => { - const { channels, error } = parseAsyncApi('# Not asyncapi') + const {channels, error} = parseAsyncApi('# Not asyncapi') expect(channels).toHaveLength(0) expect(error).toBeTruthy() }) it('returns error for invalid YAML', () => { - const { channels, error } = parseAsyncApi('::: bad yaml') + const {channels, error} = parseAsyncApi('::: bad yaml') expect(channels).toHaveLength(0) expect(error).toBeTruthy() }) diff --git a/tests/unit/formatters/vuln.test.js b/tests/unit/formatters/vuln.test.js index 71822b3..971a665 100644 --- a/tests/unit/formatters/vuln.test.js +++ b/tests/unit/formatters/vuln.test.js @@ -1,5 +1,14 @@ -import { describe, it, expect } from 'vitest' -import { colorSeverity, formatScore, formatDate, formatCveSearchTable, formatCveDetail, formatFindingsTable, formatScanSummary, formatMarkdownReport } from '../../../src/formatters/vuln.js' +import {describe, it, expect} from 'vitest' +import { + colorSeverity, + formatScore, + formatDate, + formatCveSearchTable, + formatCveDetail, + formatFindingsTable, + formatScanSummary, + formatMarkdownReport, +} from '../../../src/formatters/vuln.js' describe('colorSeverity', () => { it('returns a non-empty string for each severity level', () => { @@ -83,7 +92,7 @@ describe('formatCveSearchTable', () => { }) it('shows — when firstReference is null', () => { - const noRef = [{ ...mockResults[0], firstReference: null }] + const noRef = [{...mockResults[0], firstReference: null}] const out = formatCveSearchTable(noRef, 'openssl', 14, 1) expect(out).toContain('—') }) @@ -99,9 +108,9 @@ describe('formatCveDetail', () => { publishedDate: '2021-12-10T04:15:07.917', lastModified: '2023-11-07T03:39:36.747', status: 'Analyzed', - weaknesses: [{ id: 'CWE-502', description: 'CWE-502' }], - affectedProducts: [{ vendor: 'apache', product: 'log4j', versions: '2.0-beta9 to 2.15.0' }], - references: [{ url: 'https://example.com', source: 'test', tags: ['Vendor Advisory'] }], + weaknesses: [{id: 'CWE-502', description: 'CWE-502'}], + affectedProducts: [{vendor: 'apache', product: 'log4j', versions: '2.0-beta9 to 2.15.0'}], + references: [{url: 'https://example.com', source: 'test', tags: ['Vendor Advisory']}], } it('includes CVE ID in output', () => { @@ -170,7 +179,7 @@ describe('formatFindingsTable', () => { describe('formatScanSummary', () => { it('includes counts for all severity levels', () => { - const summary = { critical: 2, high: 1, medium: 3, low: 0, unknown: 0, total: 6 } + const summary = {critical: 2, high: 1, medium: 3, low: 0, unknown: 0, total: 6} const out = formatScanSummary(summary) expect(out).toContain('2') expect(out).toContain('Critical') @@ -182,11 +191,29 @@ describe('formatMarkdownReport', () => { const mockResult = { projectPath: '/path/to/project', scanDate: '2026-03-28T10:30:00.000Z', - ecosystems: [{ name: 'pnpm', lockFile: 'pnpm-lock.yaml', lockFilePath: '/path/pnpm-lock.yaml', auditCommand: 'pnpm audit --json', builtIn: true }], + ecosystems: [ + { + name: 'pnpm', + lockFile: 'pnpm-lock.yaml', + lockFilePath: '/path/pnpm-lock.yaml', + auditCommand: 'pnpm audit --json', + builtIn: true, + }, + ], findings: [ - { package: 'lodash', installedVersion: '4.17.20', severity: 'Critical', cveId: 'CVE-2021-23337', advisoryUrl: null, title: 'Prototype Pollution', patchedVersions: null, ecosystem: 'pnpm', isDirect: false }, + { + package: 'lodash', + installedVersion: '4.17.20', + severity: 'Critical', + cveId: 'CVE-2021-23337', + advisoryUrl: null, + title: 'Prototype Pollution', + patchedVersions: null, + ecosystem: 'pnpm', + isDirect: false, + }, ], - summary: { critical: 1, high: 0, medium: 0, low: 0, unknown: 0, total: 1 }, + summary: {critical: 1, high: 0, medium: 0, low: 0, unknown: 0, total: 1}, errors: [], } diff --git a/tests/unit/gradient.test.js b/tests/unit/gradient.test.js index 073abb2..011f0f8 100644 --- a/tests/unit/gradient.test.js +++ b/tests/unit/gradient.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect, vi, afterEach } from 'vitest' +import {describe, it, expect, vi, afterEach} from 'vitest' // Mock chalk to control color level in tests vi.mock('chalk', async () => { @@ -15,15 +15,18 @@ describe('gradientText', () => { it('returns plain text when NO_COLOR is set', async () => { vi.stubEnv('NO_COLOR', '1') // Re-import to get fresh module with new env - const { gradientText } = await import('../../src/utils/gradient.js?nocache=' + Math.random()) - const result = gradientText('hello', [[255, 0, 0], [0, 0, 255]]) + const {gradientText} = await import('../../src/utils/gradient.js?nocache=' + Math.random()) + const result = gradientText('hello', [ + [255, 0, 0], + [0, 0, 255], + ]) expect(result).toBe('hello') }) it('throws when less than 2 stops provided', async () => { // Force color enabled for this test vi.unstubAllEnvs() - const { gradientText, isColorEnabled } = await import('../../src/utils/gradient.js') + const {gradientText, isColorEnabled} = await import('../../src/utils/gradient.js') if (!isColorEnabled) { // In CI without color, skip this check (gradientText returns early before throw) expect(gradientText('hi', [[255, 0, 0]])).toBe('hi') @@ -33,14 +36,22 @@ describe('gradientText', () => { }) it('returns empty string for empty input', async () => { - const { gradientText } = await import('../../src/utils/gradient.js') - expect(gradientText('', [[255, 0, 0], [0, 0, 255]])).toBe('') + const {gradientText} = await import('../../src/utils/gradient.js') + expect( + gradientText('', [ + [255, 0, 0], + [0, 0, 255], + ]), + ).toBe('') }) it('does not color spaces', async () => { vi.unstubAllEnvs() - const { gradientText, isColorEnabled } = await import('../../src/utils/gradient.js') - const result = gradientText('a b', [[255, 0, 0], [0, 0, 255]]) + const {gradientText, isColorEnabled} = await import('../../src/utils/gradient.js') + const result = gradientText('a b', [ + [255, 0, 0], + [0, 0, 255], + ]) if (isColorEnabled) { // Spaces should be preserved as plain spaces expect(result).toContain(' ') @@ -52,7 +63,7 @@ describe('gradientText', () => { }) it('BRAND_GRADIENT has 3 stops on blue palette', async () => { - const { BRAND_GRADIENT } = await import('../../src/utils/gradient.js') + const {BRAND_GRADIENT} = await import('../../src/utils/gradient.js') expect(BRAND_GRADIENT).toHaveLength(3) expect(BRAND_GRADIENT[0]).toEqual([0, 212, 255]) expect(BRAND_GRADIENT[2]).toEqual([100, 0, 220]) @@ -60,10 +71,24 @@ describe('gradientText', () => { it('phase shifts gradient offset', async () => { vi.unstubAllEnvs() - const { gradientText, isColorEnabled } = await import('../../src/utils/gradient.js') + const {gradientText, isColorEnabled} = await import('../../src/utils/gradient.js') if (!isColorEnabled) return // skip in no-color env - const result0 = gradientText('abc', [[255, 0, 0], [0, 0, 255]], 0) - const result1 = gradientText('abc', [[255, 0, 0], [0, 0, 255]], 0.5) + const result0 = gradientText( + 'abc', + [ + [255, 0, 0], + [0, 0, 255], + ], + 0, + ) + const result1 = gradientText( + 'abc', + [ + [255, 0, 0], + [0, 0, 255], + ], + 0.5, + ) // With different phases, colored output should differ expect(result0).not.toBe(result1) }) @@ -71,7 +96,7 @@ describe('gradientText', () => { describe('isColorEnabled / isAnimationEnabled', () => { it('isAnimationEnabled is false when not TTY', async () => { - const { isAnimationEnabled } = await import('../../src/utils/gradient.js') + const {isAnimationEnabled} = await import('../../src/utils/gradient.js') // In test environment, stdout is never a real TTY expect(isAnimationEnabled).toBe(false) }) diff --git a/tests/unit/platform.test.js b/tests/unit/platform.test.js index 93eb215..ed040d2 100644 --- a/tests/unit/platform.test.js +++ b/tests/unit/platform.test.js @@ -1,9 +1,9 @@ -import { describe, it, expect, vi } from 'vitest' +import {describe, it, expect, vi} from 'vitest' describe('detectPlatform', () => { it('detects macOS', async () => { - vi.stubGlobal('process', { ...process, platform: 'darwin' }) - const { detectPlatform } = await import('../../src/services/platform.js') + vi.stubGlobal('process', {...process, platform: 'darwin'}) + const {detectPlatform} = await import('../../src/services/platform.js') const result = await detectPlatform() expect(result.platform).toBe('macos') expect(result.openCommand).toBe('open') diff --git a/tests/unit/prompts/frontmatter.test.js b/tests/unit/prompts/frontmatter.test.js index 3deedc6..643107d 100644 --- a/tests/unit/prompts/frontmatter.test.js +++ b/tests/unit/prompts/frontmatter.test.js @@ -1,5 +1,5 @@ -import { describe, it, expect } from 'vitest' -import { parseFrontmatter, serializeFrontmatter } from '../../../src/utils/frontmatter.js' +import {describe, it, expect} from 'vitest' +import {parseFrontmatter, serializeFrontmatter} from '../../../src/utils/frontmatter.js' describe('parseFrontmatter', () => { it('parses valid YAML frontmatter and separates body', () => { @@ -12,7 +12,7 @@ tags: --- This is the prompt body.` - const { frontmatter, body } = parseFrontmatter(content) + const {frontmatter, body} = parseFrontmatter(content) expect(frontmatter.title).toBe('Refactor Prompt') expect(frontmatter.description).toBe('A prompt for code refactoring') expect(frontmatter.tags).toEqual(['refactor', 'coding']) @@ -21,7 +21,7 @@ This is the prompt body.` it('returns empty frontmatter when no frontmatter block is present', () => { const content = 'Just a plain markdown body without any frontmatter.' - const { frontmatter, body } = parseFrontmatter(content) + const {frontmatter, body} = parseFrontmatter(content) expect(frontmatter).toEqual({}) expect(body).toBe(content) }) @@ -30,7 +30,7 @@ This is the prompt body.` const content = `--- --- Body after empty frontmatter.` - const { frontmatter, body } = parseFrontmatter(content) + const {frontmatter, body} = parseFrontmatter(content) expect(frontmatter).toEqual({}) expect(body.trim()).toBe('Body after empty frontmatter.') }) @@ -41,7 +41,7 @@ title: No Tags tags: [] --- Body here.` - const { frontmatter } = parseFrontmatter(content) + const {frontmatter} = parseFrontmatter(content) expect(frontmatter.title).toBe('No Tags') expect(frontmatter.tags).toEqual([]) }) @@ -55,7 +55,7 @@ Line one. Line two. Line three.` - const { body } = parseFrontmatter(content) + const {body} = parseFrontmatter(content) expect(body).toContain('Line one.') expect(body).toContain('Line two.') expect(body).toContain('Line three.') @@ -67,7 +67,7 @@ Line three.` title: "broken yaml --- Body.` - const { frontmatter, body } = parseFrontmatter(content) + const {frontmatter, body} = parseFrontmatter(content) expect(frontmatter).toEqual({}) // Falls back to raw content expect(body).toBe(content) @@ -75,7 +75,7 @@ Body.` it('handles Windows line endings (CRLF)', () => { const content = '---\r\ntitle: Windows\r\n---\r\nBody.' - const { frontmatter, body } = parseFrontmatter(content) + const {frontmatter, body} = parseFrontmatter(content) expect(frontmatter.title).toBe('Windows') expect(body).toBe('Body.') }) @@ -83,7 +83,7 @@ Body.` describe('serializeFrontmatter', () => { it('produces valid ---\\n...\\n---\\n output', () => { - const fm = { title: 'My Prompt', tags: ['a', 'b'] } + const fm = {title: 'My Prompt', tags: ['a', 'b']} const body = 'Prompt body content.' const result = serializeFrontmatter(fm, body) expect(result).toMatch(/^---\n/) @@ -105,9 +105,9 @@ tags: - test --- Body content.` - const { frontmatter, body } = parseFrontmatter(original) + const {frontmatter, body} = parseFrontmatter(original) const serialized = serializeFrontmatter(frontmatter, body) - const { frontmatter: fm2, body: body2 } = parseFrontmatter(serialized) + const {frontmatter: fm2, body: body2} = parseFrontmatter(serialized) expect(fm2.title).toBe('Round Trip') expect(fm2.description).toBe('Testing round trip') expect(fm2.tags).toEqual(['test']) diff --git a/tests/unit/security/steps.test.js b/tests/unit/security/steps.test.js index 455e98f..143f5d0 100644 --- a/tests/unit/security/steps.test.js +++ b/tests/unit/security/steps.test.js @@ -1,12 +1,12 @@ -import { describe, it, expect } from 'vitest' -import { buildSteps } from '../../../src/services/security.js' +import {describe, it, expect} from 'vitest' +import {buildSteps} from '../../../src/services/security.js' /** @type {import('../../../src/types.js').PlatformInfo} */ -const MACOS = { platform: 'macos', openCommand: 'open', credentialHelper: 'osxkeychain' } +const MACOS = {platform: 'macos', openCommand: 'open', credentialHelper: 'osxkeychain'} /** @type {import('../../../src/types.js').PlatformInfo} */ -const LINUX = { platform: 'linux', openCommand: 'xdg-open', credentialHelper: 'store' } +const LINUX = {platform: 'linux', openCommand: 'xdg-open', credentialHelper: 'store'} /** @type {import('../../../src/types.js').PlatformInfo} */ -const WSL2 = { platform: 'wsl2', openCommand: 'wslview', credentialHelper: 'manager' } +const WSL2 = {platform: 'wsl2', openCommand: 'wslview', credentialHelper: 'manager'} describe('buildSteps()', () => { // --------------------------------------------------------------------------- @@ -196,7 +196,7 @@ describe('buildSteps()', () => { describe('context.gpgId', () => { it('init-pass step run returns skipped when gpgId is provided and pass ls succeeds', async () => { // We pass gpgId but can't easily mock pass ls, so just verify step id exists - const steps = buildSteps(LINUX, 'aws', { gpgId: 'ABCDEF1234567890' }) + const steps = buildSteps(LINUX, 'aws', {gpgId: 'ABCDEF1234567890'}) const initPass = steps.find((s) => s.id === 'init-pass') expect(initPass).toBeDefined() }) diff --git a/tests/unit/services/ai-config-store.test.js b/tests/unit/services/ai-config-store.test.js new file mode 100644 index 0000000..bccbca3 --- /dev/null +++ b/tests/unit/services/ai-config-store.test.js @@ -0,0 +1,335 @@ +import {describe, it, expect, beforeEach, afterEach, vi} from 'vitest' +import {join} from 'node:path' +import {tmpdir} from 'node:os' +import {writeFile, mkdir, rm} from 'node:fs/promises' +import {existsSync} from 'node:fs' + +import { + loadAIConfig, + addEntry, + updateEntry, + deactivateEntry, + activateEntry, + deleteEntry, + getEntriesByEnvironment, + getEntriesByType, +} from '../../../src/services/ai-config-store.js' +import {DvmiError} from '../../../src/utils/errors.js' + +// ────────────────────────────────────────────────────────────────────────────── +// Helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Build a unique temp file path per test run. + * @returns {string} + */ +function makeTmpPath() { + const dir = join(tmpdir(), `dvmi-ai-config-test-${Date.now()}-${Math.random().toString(36).slice(2)}`) + return join(dir, 'ai-config.json') +} + +/** Minimal valid entry data for an MCP entry targeting compatible environments. */ +const MCP_ENTRY = { + name: 'test-mcp', + type: /** @type {import('../../../src/types.js').CategoryType} */ ('mcp'), + environments: /** @type {import('../../../src/types.js').EnvironmentId[]} */ (['claude-code']), + params: {transport: 'stdio', command: 'npx', args: [], env: {}}, +} + +// ────────────────────────────────────────────────────────────────────────────── +// Test setup / teardown +// ────────────────────────────────────────────────────────────────────────────── + +let tmpPath + +beforeEach(() => { + tmpPath = makeTmpPath() + process.env.DVMI_AI_CONFIG_PATH = tmpPath +}) + +afterEach(async () => { + delete process.env.DVMI_AI_CONFIG_PATH + const dir = join(tmpPath, '..') + if (existsSync(dir)) { + await rm(dir, {recursive: true, force: true}) + } +}) + +// ────────────────────────────────────────────────────────────────────────────── +// loadAIConfig +// ────────────────────────────────────────────────────────────────────────────── + +describe('loadAIConfig', () => { + it('returns defaults when file does not exist', async () => { + const store = await loadAIConfig(tmpPath) + expect(store).toEqual({version: 1, entries: []}) + }) + + it('returns parsed content from an existing valid file', async () => { + const dir = join(tmpPath, '..') + await mkdir(dir, {recursive: true}) + const data = { + version: 1, + entries: [ + { + id: '00000000-0000-0000-0000-000000000001', + name: 'existing-mcp', + type: 'mcp', + active: true, + environments: ['claude-code'], + params: {transport: 'stdio', command: 'npx', args: [], env: {}}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + ], + } + await writeFile(tmpPath, JSON.stringify(data), 'utf8') + + const store = await loadAIConfig(tmpPath) + expect(store.version).toBe(1) + expect(store.entries).toHaveLength(1) + expect(store.entries[0].name).toBe('existing-mcp') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// addEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('addEntry', () => { + it('creates an entry with UUID, active: true, and timestamps', async () => { + const before = new Date() + const entry = await addEntry(MCP_ENTRY, tmpPath) + const after = new Date() + + expect(entry.id).toMatch(/^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i) + expect(entry.active).toBe(true) + expect(entry.name).toBe(MCP_ENTRY.name) + expect(entry.type).toBe(MCP_ENTRY.type) + expect(entry.environments).toEqual(MCP_ENTRY.environments) + + const createdAt = new Date(entry.createdAt) + const updatedAt = new Date(entry.updatedAt) + expect(createdAt.getTime()).toBeGreaterThanOrEqual(before.getTime()) + expect(createdAt.getTime()).toBeLessThanOrEqual(after.getTime()) + expect(updatedAt.getTime()).toEqual(createdAt.getTime()) + }) + + it('throws DvmiError for a duplicate name within the same type', async () => { + await addEntry(MCP_ENTRY, tmpPath) + await expect(addEntry(MCP_ENTRY, tmpPath)).rejects.toThrow(DvmiError) + await expect(addEntry(MCP_ENTRY, tmpPath)).rejects.toThrow(/already exists/) + }) + + it('throws DvmiError when the name contains invalid filename characters', async () => { + const bad = {...MCP_ENTRY, name: 'bad/name'} + await expect(addEntry(bad, tmpPath)).rejects.toThrow(DvmiError) + await expect(addEntry(bad, tmpPath)).rejects.toThrow(/invalid characters/) + }) + + it('throws DvmiError when an environment is incompatible with the entry type', async () => { + const incompatible = { + name: 'agent-for-gemini', + type: /** @type {import('../../../src/types.js').CategoryType} */ ('agent'), + environments: /** @type {import('../../../src/types.js').EnvironmentId[]} */ (['gemini-cli']), + params: {instructions: 'do stuff'}, + } + await expect(addEntry(incompatible, tmpPath)).rejects.toThrow(DvmiError) + await expect(addEntry(incompatible, tmpPath)).rejects.toThrow(/does not support type/) + }) + + it('succeeds for compatible environment and type combinations', async () => { + const compatible = { + name: 'mcp-for-gemini', + type: /** @type {import('../../../src/types.js').CategoryType} */ ('mcp'), + environments: /** @type {import('../../../src/types.js').EnvironmentId[]} */ (['gemini-cli']), + params: {transport: 'stdio', command: 'npx', args: [], env: {}}, + } + const entry = await addEntry(compatible, tmpPath) + expect(entry.id).toBeTruthy() + expect(entry.environments).toContain('gemini-cli') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// updateEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('updateEntry', () => { + it('merges changes and updates updatedAt', async () => { + const original = await addEntry(MCP_ENTRY, tmpPath) + + // Small delay to ensure updatedAt differs from createdAt + await new Promise((r) => setTimeout(r, 5)) + + const updated = await updateEntry(original.id, {name: 'renamed-mcp', environments: ['opencode']}, tmpPath) + + expect(updated.id).toBe(original.id) + expect(updated.name).toBe('renamed-mcp') + expect(updated.environments).toEqual(['opencode']) + expect(updated.type).toBe(original.type) + expect(new Date(updated.updatedAt).getTime()).toBeGreaterThan(new Date(original.updatedAt).getTime()) + }) + + it('throws DvmiError when the entry id is not found', async () => { + await expect(updateEntry('non-existent-id', {name: 'x'}, tmpPath)).rejects.toThrow(DvmiError) + await expect(updateEntry('non-existent-id', {name: 'x'}, tmpPath)).rejects.toThrow(/not found/) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// deactivateEntry / activateEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('deactivateEntry', () => { + it('sets active to false', async () => { + const entry = await addEntry(MCP_ENTRY, tmpPath) + const deactivated = await deactivateEntry(entry.id, tmpPath) + expect(deactivated.active).toBe(false) + + const store = await loadAIConfig(tmpPath) + expect(store.entries[0].active).toBe(false) + }) +}) + +describe('activateEntry', () => { + it('sets active to true after deactivation', async () => { + const entry = await addEntry(MCP_ENTRY, tmpPath) + await deactivateEntry(entry.id, tmpPath) + const activated = await activateEntry(entry.id, tmpPath) + expect(activated.active).toBe(true) + + const store = await loadAIConfig(tmpPath) + expect(store.entries[0].active).toBe(true) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// deleteEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('deleteEntry', () => { + it('removes the entry from the store', async () => { + const entry = await addEntry(MCP_ENTRY, tmpPath) + await deleteEntry(entry.id, tmpPath) + + const store = await loadAIConfig(tmpPath) + expect(store.entries).toHaveLength(0) + }) + + it('throws DvmiError when the entry id is not found', async () => { + await expect(deleteEntry('non-existent-id', tmpPath)).rejects.toThrow(DvmiError) + await expect(deleteEntry('non-existent-id', tmpPath)).rejects.toThrow(/not found/) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getEntriesByEnvironment +// ────────────────────────────────────────────────────────────────────────────── + +describe('getEntriesByEnvironment', () => { + it('returns only active entries that include the given environment', async () => { + const active = await addEntry({...MCP_ENTRY, name: 'active-mcp', environments: ['claude-code']}, tmpPath) + const alsoActive = await addEntry( + { + name: 'active-opencode', + type: 'mcp', + environments: ['opencode'], + params: {transport: 'stdio', command: 'npx', args: [], env: {}}, + }, + tmpPath, + ) + await deactivateEntry(active.id, tmpPath) + + const results = await getEntriesByEnvironment('claude-code', tmpPath) + expect(results.every((e) => e.active)).toBe(true) + expect(results.every((e) => e.environments.includes('claude-code'))).toBe(true) + expect(results.find((e) => e.id === active.id)).toBeUndefined() + expect(results.find((e) => e.id === alsoActive.id)).toBeUndefined() + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getEntriesByType +// ────────────────────────────────────────────────────────────────────────────── + +describe('getEntriesByType', () => { + it('returns all entries of the given type regardless of active flag', async () => { + const mcp1 = await addEntry({...MCP_ENTRY, name: 'mcp-one'}, tmpPath) + const mcp2 = await addEntry({...MCP_ENTRY, name: 'mcp-two'}, tmpPath) + await addEntry( + { + name: 'a-command', + type: 'command', + environments: ['claude-code'], + params: {content: 'do something'}, + }, + tmpPath, + ) + await deactivateEntry(mcp2.id, tmpPath) + + const results = await getEntriesByType('mcp', tmpPath) + expect(results).toHaveLength(2) + expect(results.map((e) => e.id).sort()).toEqual([mcp1.id, mcp2.id].sort()) + // Both active and inactive are returned + expect(results.find((e) => e.id === mcp2.id)?.active).toBe(false) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// syncAIConfigToChezmoi +// ────────────────────────────────────────────────────────────────────────────── + +vi.mock('../../../src/services/shell.js', () => ({exec: vi.fn()})) +vi.mock('../../../src/services/config.js', () => ({loadConfig: vi.fn()})) + +describe('syncAIConfigToChezmoi', () => { + let execMock + let loadConfigMock + + beforeEach(async () => { + const shellModule = await import('../../../src/services/shell.js') + const configModule = await import('../../../src/services/config.js') + execMock = shellModule.exec + loadConfigMock = configModule.loadConfig + vi.clearAllMocks() + }) + + it('calls chezmoi add when dotfiles.enabled is true', async () => { + loadConfigMock.mockResolvedValue({dotfiles: {enabled: true}}) + execMock.mockResolvedValue({stdout: '', stderr: '', exitCode: 0}) + + const {syncAIConfigToChezmoi} = await import('../../../src/services/ai-config-store.js') + await syncAIConfigToChezmoi() + + expect(execMock).toHaveBeenCalledOnce() + expect(execMock).toHaveBeenCalledWith('chezmoi', ['add', expect.any(String)]) + }) + + it('skips when dotfiles.enabled is false', async () => { + loadConfigMock.mockResolvedValue({dotfiles: {enabled: false}}) + + const {syncAIConfigToChezmoi} = await import('../../../src/services/ai-config-store.js') + await syncAIConfigToChezmoi() + + expect(execMock).not.toHaveBeenCalled() + }) + + it('skips when dotfiles is not configured', async () => { + loadConfigMock.mockResolvedValue({}) + + const {syncAIConfigToChezmoi} = await import('../../../src/services/ai-config-store.js') + await syncAIConfigToChezmoi() + + expect(execMock).not.toHaveBeenCalled() + }) + + it('does not throw when chezmoi fails', async () => { + loadConfigMock.mockResolvedValue({dotfiles: {enabled: true}}) + execMock.mockRejectedValue(new Error('chezmoi not found')) + + const {syncAIConfigToChezmoi} = await import('../../../src/services/ai-config-store.js') + await expect(syncAIConfigToChezmoi()).resolves.toBeUndefined() + }) +}) diff --git a/tests/unit/services/ai-env-deployer.test.js b/tests/unit/services/ai-env-deployer.test.js new file mode 100644 index 0000000..5d2c90a --- /dev/null +++ b/tests/unit/services/ai-env-deployer.test.js @@ -0,0 +1,615 @@ +import {describe, it, expect, beforeEach, afterEach} from 'vitest' +import {join} from 'node:path' +import {tmpdir} from 'node:os' +import {readFile, mkdir, writeFile, rm} from 'node:fs/promises' +import {existsSync} from 'node:fs' +import {randomUUID} from 'node:crypto' + +import { + deployMCPEntry, + undeployMCPEntry, + deployFileEntry, + undeployFileEntry, + deployEntry, + undeployEntry, + reconcileOnScan, +} from '../../../src/services/ai-env-deployer.js' + +// ────────────────────────────────────────────────────────────────────────────── +// Helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Create a unique temporary directory per test. + * @returns {string} + */ +function makeTmpDir() { + return join(tmpdir(), `dvmi-deployer-test-${Date.now()}-${randomUUID().slice(0, 8)}`) +} + +/** + * Read and parse a JSON file from disk. + * @param {string} filePath + * @returns {Promise>} + */ +async function readJson(filePath) { + const raw = await readFile(filePath, 'utf8') + return JSON.parse(raw) +} + +/** + * Build a minimal CategoryEntry for MCP type. + * @param {Partial} [overrides] + * @returns {import('../../../src/types.js').CategoryEntry} + */ +function makeMCPEntry(overrides = {}) { + return { + id: randomUUID(), + name: 'test-server', + type: 'mcp', + active: true, + environments: ['claude-code'], + params: {transport: 'stdio', command: 'npx', args: ['-y', 'test-pkg'], env: {}}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + ...overrides, + } +} + +/** + * Build a minimal CategoryEntry for command type. + * @param {Partial} [overrides] + * @returns {import('../../../src/types.js').CategoryEntry} + */ +function makeCommandEntry(overrides = {}) { + return { + id: randomUUID(), + name: 'my-command', + type: 'command', + active: true, + environments: ['claude-code'], + params: {content: '# My Command\nDo something useful.', description: 'A test command'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + ...overrides, + } +} + +/** + * Build a minimal DetectedEnvironment stub. + * @param {import('../../../src/types.js').EnvironmentId} id + * @param {string[]} [unreadable] + * @returns {import('../../../src/types.js').DetectedEnvironment} + */ +function makeDetected(id, unreadable = []) { + return { + id, + name: id, + detected: true, + projectPaths: [], + globalPaths: [], + unreadable, + supportedCategories: ['mcp', 'command', 'skill', 'agent'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'project', + } +} + +// ────────────────────────────────────────────────────────────────────────────── +// Test lifecycle +// ────────────────────────────────────────────────────────────────────────────── + +let cwd + +beforeEach(async () => { + cwd = makeTmpDir() + await mkdir(cwd, {recursive: true}) +}) + +afterEach(async () => { + if (existsSync(cwd)) { + await rm(cwd, {recursive: true, force: true}) + } +}) + +// ────────────────────────────────────────────────────────────────────────────── +// deployMCPEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('deployMCPEntry', () => { + it('creates a new JSON file with the mcpServers entry when file does not exist', async () => { + const entry = makeMCPEntry({name: 'my-mcp', environments: ['claude-code']}) + + await deployMCPEntry(entry, 'claude-code', cwd) + + const filePath = join(cwd, '.mcp.json') + expect(existsSync(filePath)).toBe(true) + + const json = await readJson(filePath) + expect(json).toHaveProperty('mcpServers') + expect(json.mcpServers).toHaveProperty('my-mcp') + expect(json.mcpServers['my-mcp']).toMatchObject({command: 'npx'}) + }) + + it('merges into an existing JSON file, preserving other entries', async () => { + const filePath = join(cwd, '.mcp.json') + const existing = { + mcpServers: { + 'existing-server': {command: 'node', args: ['server.js'], env: {}}, + }, + } + await mkdir(join(cwd), {recursive: true}) + await writeFile(filePath, JSON.stringify(existing), 'utf8') + + const entry = makeMCPEntry({name: 'new-server', environments: ['claude-code']}) + await deployMCPEntry(entry, 'claude-code', cwd) + + const json = await readJson(filePath) + expect(json.mcpServers).toHaveProperty('existing-server') + expect(json.mcpServers).toHaveProperty('new-server') + }) + + it('handles vscode-copilot: writes to .vscode/mcp.json with "servers" key', async () => { + const entry = makeMCPEntry({name: 'vscode-mcp', environments: ['vscode-copilot']}) + + await deployMCPEntry(entry, 'vscode-copilot', cwd) + + const filePath = join(cwd, '.vscode', 'mcp.json') + expect(existsSync(filePath)).toBe(true) + + const json = await readJson(filePath) + expect(json).toHaveProperty('servers') + expect(json).not.toHaveProperty('mcpServers') + expect(json.servers).toHaveProperty('vscode-mcp') + }) + + it('handles claude-code: writes to .mcp.json with "mcpServers" key', async () => { + const entry = makeMCPEntry({name: 'claude-mcp', environments: ['claude-code']}) + + await deployMCPEntry(entry, 'claude-code', cwd) + + const json = await readJson(join(cwd, '.mcp.json')) + expect(json).toHaveProperty('mcpServers') + expect(json.mcpServers).toHaveProperty('claude-mcp') + }) + + it('handles opencode: writes to opencode.json with "mcpServers" key', async () => { + const entry = makeMCPEntry({name: 'oc-mcp', environments: ['opencode']}) + + await deployMCPEntry(entry, 'opencode', cwd) + + const json = await readJson(join(cwd, 'opencode.json')) + expect(json).toHaveProperty('mcpServers') + expect(json.mcpServers).toHaveProperty('oc-mcp') + }) + + it('handles gemini-cli: writes to ~/.gemini/settings.json with "mcpServers" key', async () => { + // We cannot write to real homedir in tests; we verify the path structure by + // pre-creating the directory under a unique path then checking the written file + const {homedir} = await import('node:os') + const geminiSettingsPath = join(homedir(), '.gemini', 'settings.json') + + // Read current state (may not exist) so we can restore it + const hadExistingFile = existsSync(geminiSettingsPath) + let originalContent = null + if (hadExistingFile) { + originalContent = await readFile(geminiSettingsPath, 'utf8') + } + + try { + const entry = makeMCPEntry({name: 'gemini-mcp', environments: ['gemini-cli']}) + await deployMCPEntry(entry, 'gemini-cli', cwd) + + expect(existsSync(geminiSettingsPath)).toBe(true) + const json = await readJson(geminiSettingsPath) + expect(json).toHaveProperty('mcpServers') + expect(json.mcpServers).toHaveProperty('gemini-mcp') + } finally { + // Restore previous state + if (hadExistingFile && originalContent !== null) { + await writeFile(geminiSettingsPath, originalContent, 'utf8') + } else if (existsSync(geminiSettingsPath)) { + await rm(geminiSettingsPath, {force: true}) + } + } + }) + + it('handles copilot-cli: writes to ~/.copilot/mcp-config.json with "mcpServers" key', async () => { + const {homedir} = await import('node:os') + const copilotMcpPath = join(homedir(), '.copilot', 'mcp-config.json') + + const hadExistingFile = existsSync(copilotMcpPath) + let originalContent = null + if (hadExistingFile) { + originalContent = await readFile(copilotMcpPath, 'utf8') + } + + try { + const entry = makeMCPEntry({name: 'copilot-mcp', environments: ['copilot-cli']}) + await deployMCPEntry(entry, 'copilot-cli', cwd) + + expect(existsSync(copilotMcpPath)).toBe(true) + const json = await readJson(copilotMcpPath) + expect(json).toHaveProperty('mcpServers') + expect(json.mcpServers).toHaveProperty('copilot-mcp') + } finally { + if (hadExistingFile && originalContent !== null) { + await writeFile(copilotMcpPath, originalContent, 'utf8') + } else if (existsSync(copilotMcpPath)) { + await rm(copilotMcpPath, {force: true}) + } + } + }) + + it('is a no-op when entry type is not mcp', async () => { + const entry = makeCommandEntry() + + // Should not throw and should not create any file + await expect(deployMCPEntry(entry, 'claude-code', cwd)).resolves.toBeUndefined() + expect(existsSync(join(cwd, '.mcp.json'))).toBe(false) + }) + + it('is a no-op when entry is null', async () => { + await expect(deployMCPEntry(null, 'claude-code', cwd)).resolves.toBeUndefined() + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// undeployMCPEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('undeployMCPEntry', () => { + it('removes an entry by name while preserving other entries', async () => { + const filePath = join(cwd, '.mcp.json') + const initial = { + mcpServers: { + 'server-a': {command: 'node', args: [], env: {}}, + 'server-b': {command: 'npx', args: ['-y', 'pkg'], env: {}}, + }, + } + await writeFile(filePath, JSON.stringify(initial), 'utf8') + + await undeployMCPEntry('server-a', 'claude-code', cwd) + + const json = await readJson(filePath) + expect(json.mcpServers).not.toHaveProperty('server-a') + expect(json.mcpServers).toHaveProperty('server-b') + }) + + it('leaves an empty mcpServers object when the last entry is removed', async () => { + const filePath = join(cwd, '.mcp.json') + const initial = { + mcpServers: { + 'only-server': {command: 'node', args: [], env: {}}, + }, + } + await writeFile(filePath, JSON.stringify(initial), 'utf8') + + await undeployMCPEntry('only-server', 'claude-code', cwd) + + const json = await readJson(filePath) + expect(json).toHaveProperty('mcpServers') + expect(Object.keys(json.mcpServers)).toHaveLength(0) + }) + + it('is a no-op when the target file does not exist', async () => { + // Should not throw + await expect(undeployMCPEntry('nonexistent', 'claude-code', cwd)).resolves.toBeUndefined() + expect(existsSync(join(cwd, '.mcp.json'))).toBe(false) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// deployFileEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('deployFileEntry', () => { + it('creates a markdown file at the correct path for a claude-code command', async () => { + const entry = makeCommandEntry({ + name: 'refactor', + environments: ['claude-code'], + params: {content: '# Refactor\nRefactor the selected code.', description: 'Refactor'}, + }) + + await deployFileEntry(entry, 'claude-code', cwd) + + const filePath = join(cwd, '.claude', 'commands', 'refactor.md') + expect(existsSync(filePath)).toBe(true) + const content = await readFile(filePath, 'utf8') + expect(content).toBe('# Refactor\nRefactor the selected code.') + }) + + it('creates a TOML file for a gemini-cli command', async () => { + const entry = makeCommandEntry({ + name: 'summarise', + environments: ['gemini-cli'], + params: {content: 'Summarise the current file.', description: 'Summarise'}, + }) + + // Use a real temp dir for the gemini path; we capture the expected path and + // clean it up afterwards. + const {homedir} = await import('node:os') + const tomlPath = join(homedir(), '.gemini', 'commands', 'summarise.toml') + + const hadExistingFile = existsSync(tomlPath) + let originalContent = null + if (hadExistingFile) { + originalContent = await readFile(tomlPath, 'utf8') + } + + try { + await deployFileEntry(entry, 'gemini-cli', cwd) + + expect(existsSync(tomlPath)).toBe(true) + const raw = await readFile(tomlPath, 'utf8') + expect(raw).toContain('description = "Summarise"') + expect(raw).toContain('[prompt]') + expect(raw).toContain('Summarise the current file.') + } finally { + if (hadExistingFile && originalContent !== null) { + await writeFile(tomlPath, originalContent, 'utf8') + } else if (existsSync(tomlPath)) { + await rm(tomlPath, {force: true}) + } + } + }) + + it('creates nested directory structure {name}/SKILL.md for vscode-copilot skills', async () => { + const entry = { + id: randomUUID(), + name: 'my-skill', + type: /** @type {import('../../../src/types.js').CategoryType} */ ('skill'), + active: true, + environments: /** @type {import('../../../src/types.js').EnvironmentId[]} */ (['vscode-copilot']), + params: {content: '# My Skill\nThis is a skill definition.'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + + await deployFileEntry(entry, 'vscode-copilot', cwd) + + const skillDir = join(cwd, '.github', 'skills', 'my-skill') + const skillFile = join(skillDir, 'SKILL.md') + + expect(existsSync(skillDir)).toBe(true) + expect(existsSync(skillFile)).toBe(true) + + const content = await readFile(skillFile, 'utf8') + expect(content).toBe('# My Skill\nThis is a skill definition.') + }) + + it('creates a markdown file for an opencode command', async () => { + const entry = makeCommandEntry({ + name: 'generate-tests', + environments: ['opencode'], + }) + + await deployFileEntry(entry, 'opencode', cwd) + + const filePath = join(cwd, '.opencode', 'commands', 'generate-tests.md') + expect(existsSync(filePath)).toBe(true) + }) + + it('creates a markdown file for a vscode-copilot command (prompt.md)', async () => { + const entry = makeCommandEntry({ + name: 'fix-types', + environments: ['vscode-copilot'], + }) + + await deployFileEntry(entry, 'vscode-copilot', cwd) + + const filePath = join(cwd, '.github', 'prompts', 'fix-types.prompt.md') + expect(existsSync(filePath)).toBe(true) + }) + + it('creates a markdown file for a claude-code agent using instructions field', async () => { + const entry = { + id: randomUUID(), + name: 'code-reviewer', + type: /** @type {import('../../../src/types.js').CategoryType} */ ('agent'), + active: true, + environments: /** @type {import('../../../src/types.js').EnvironmentId[]} */ (['claude-code']), + params: {instructions: 'Review code for quality and security.'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + + await deployFileEntry(entry, 'claude-code', cwd) + + const filePath = join(cwd, '.claude', 'agents', 'code-reviewer.md') + expect(existsSync(filePath)).toBe(true) + const content = await readFile(filePath, 'utf8') + expect(content).toBe('Review code for quality and security.') + }) + + it('is a no-op when entry is null', async () => { + await expect(deployFileEntry(null, 'claude-code', cwd)).resolves.toBeUndefined() + }) + + it('is a no-op when entry type is mcp', async () => { + const entry = makeMCPEntry() + await expect(deployFileEntry(entry, 'claude-code', cwd)).resolves.toBeUndefined() + expect(existsSync(join(cwd, '.claude', 'commands'))).toBe(false) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// undeployFileEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('undeployFileEntry', () => { + it('removes the file at the target path', async () => { + // First deploy so the file exists + const entry = makeCommandEntry({name: 'to-remove', environments: ['claude-code']}) + await deployFileEntry(entry, 'claude-code', cwd) + + const filePath = join(cwd, '.claude', 'commands', 'to-remove.md') + expect(existsSync(filePath)).toBe(true) + + await undeployFileEntry('to-remove', 'command', 'claude-code', cwd) + + expect(existsSync(filePath)).toBe(false) + }) + + it('is a no-op when the file does not exist', async () => { + await expect(undeployFileEntry('nonexistent', 'command', 'claude-code', cwd)).resolves.toBeUndefined() + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// deployEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('deployEntry', () => { + it('only deploys to environments that are in detectedEnvs', async () => { + const entry = makeMCPEntry({ + name: 'multi-env-mcp', + environments: ['claude-code', 'vscode-copilot'], + }) + + // Only claude-code is detected + const detectedEnvs = [makeDetected('claude-code')] + + await deployEntry(entry, detectedEnvs, cwd) + + // claude-code file should exist + expect(existsSync(join(cwd, '.mcp.json'))).toBe(true) + // vscode-copilot file should NOT exist (not detected) + expect(existsSync(join(cwd, '.vscode', 'mcp.json'))).toBe(false) + }) + + it('deploys to all detected environments listed in entry.environments', async () => { + const entry = makeMCPEntry({ + name: 'both-env-mcp', + environments: ['claude-code', 'vscode-copilot'], + }) + + const detectedEnvs = [makeDetected('claude-code'), makeDetected('vscode-copilot')] + + await deployEntry(entry, detectedEnvs, cwd) + + expect(existsSync(join(cwd, '.mcp.json'))).toBe(true) + expect(existsSync(join(cwd, '.vscode', 'mcp.json'))).toBe(true) + }) + + it('skips environments whose target MCP JSON file is marked as unreadable', async () => { + const mcpPath = join(cwd, '.mcp.json') + // Write a corrupt JSON file so it is "unreadable" + await writeFile(mcpPath, 'NOT VALID JSON }{', 'utf8') + + const entry = makeMCPEntry({name: 'skip-unreadable', environments: ['claude-code']}) + + // The detected env has the target file in its unreadable list + const detectedEnvs = [makeDetected('claude-code', [mcpPath])] + + const originalStat = await readFile(mcpPath, 'utf8') + await deployEntry(entry, detectedEnvs, cwd) + const afterStat = await readFile(mcpPath, 'utf8') + + // The corrupt file must NOT have been overwritten + expect(afterStat).toBe(originalStat) + }) + + it('is a no-op for an empty detectedEnvs array', async () => { + const entry = makeMCPEntry({environments: ['claude-code']}) + + await deployEntry(entry, [], cwd) + + expect(existsSync(join(cwd, '.mcp.json'))).toBe(false) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// undeployEntry +// ────────────────────────────────────────────────────────────────────────────── + +describe('undeployEntry', () => { + it('removes deployed files for all detected environments', async () => { + const entry = makeCommandEntry({ + name: 'cleanup-cmd', + environments: ['claude-code', 'vscode-copilot'], + }) + + const detectedEnvs = [makeDetected('claude-code'), makeDetected('vscode-copilot')] + + // Deploy first + await deployEntry(entry, detectedEnvs, cwd) + expect(existsSync(join(cwd, '.claude', 'commands', 'cleanup-cmd.md'))).toBe(true) + expect(existsSync(join(cwd, '.github', 'prompts', 'cleanup-cmd.prompt.md'))).toBe(true) + + await undeployEntry(entry, detectedEnvs, cwd) + + expect(existsSync(join(cwd, '.claude', 'commands', 'cleanup-cmd.md'))).toBe(false) + expect(existsSync(join(cwd, '.github', 'prompts', 'cleanup-cmd.prompt.md'))).toBe(false) + }) + + it('is a no-op when entry is null', async () => { + await expect(undeployEntry(null, [makeDetected('claude-code')], cwd)).resolves.toBeUndefined() + }) + + it('is a no-op when entry is undefined', async () => { + await expect(undeployEntry(undefined, [makeDetected('claude-code')], cwd)).resolves.toBeUndefined() + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// reconcileOnScan +// ────────────────────────────────────────────────────────────────────────────── + +describe('reconcileOnScan', () => { + it('deploys active entries to detected environments', async () => { + const entries = [ + makeMCPEntry({name: 'active-mcp', environments: ['claude-code'], active: true}), + makeCommandEntry({name: 'active-cmd', environments: ['claude-code'], active: true}), + ] + + const detectedEnvs = [makeDetected('claude-code')] + + await reconcileOnScan(entries, detectedEnvs, cwd) + + expect(existsSync(join(cwd, '.mcp.json'))).toBe(true) + const json = await readJson(join(cwd, '.mcp.json')) + expect(json.mcpServers).toHaveProperty('active-mcp') + + expect(existsSync(join(cwd, '.claude', 'commands', 'active-cmd.md'))).toBe(true) + }) + + it('does not deploy inactive entries', async () => { + const entries = [makeMCPEntry({name: 'inactive-mcp', environments: ['claude-code'], active: false})] + + const detectedEnvs = [makeDetected('claude-code')] + + await reconcileOnScan(entries, detectedEnvs, cwd) + + expect(existsSync(join(cwd, '.mcp.json'))).toBe(false) + }) + + it('does not deploy to environments that are not detected', async () => { + const entries = [makeMCPEntry({name: 'no-env-mcp', environments: ['vscode-copilot'], active: true})] + + // Only claude-code is detected, not vscode-copilot + const detectedEnvs = [makeDetected('claude-code')] + + await reconcileOnScan(entries, detectedEnvs, cwd) + + expect(existsSync(join(cwd, '.vscode', 'mcp.json'))).toBe(false) + }) + + it('is idempotent — calling twice produces the same result', async () => { + const entries = [makeMCPEntry({name: 'idempotent-mcp', environments: ['claude-code'], active: true})] + + const detectedEnvs = [makeDetected('claude-code')] + + await reconcileOnScan(entries, detectedEnvs, cwd) + await reconcileOnScan(entries, detectedEnvs, cwd) + + const json = await readJson(join(cwd, '.mcp.json')) + // Entry should appear exactly once (not duplicated) + const keys = Object.keys(json.mcpServers) + expect(keys.filter((k) => k === 'idempotent-mcp')).toHaveLength(1) + }) + + it('is a no-op when entries array is empty', async () => { + await expect(reconcileOnScan([], [makeDetected('claude-code')], cwd)).resolves.toBeUndefined() + }) +}) diff --git a/tests/unit/services/ai-env-scanner.test.js b/tests/unit/services/ai-env-scanner.test.js new file mode 100644 index 0000000..24b4a20 --- /dev/null +++ b/tests/unit/services/ai-env-scanner.test.js @@ -0,0 +1,387 @@ +import {describe, it, expect, vi, beforeEach, afterEach} from 'vitest' + +vi.mock('node:fs') + +import {existsSync, readFileSync} from 'node:fs' +import { + scanEnvironments, + getCompatibleEnvironments, + computeCategoryCounts, +} from '../../../src/services/ai-env-scanner.js' + +const CWD = '/fake/project' + +beforeEach(() => { + // Default: nothing exists + vi.mocked(existsSync).mockReturnValue(false) + vi.mocked(readFileSync).mockReturnValue('{}') +}) + +afterEach(() => { + vi.resetAllMocks() +}) + +// ────────────────────────────────────────────────────────────────────────────── +// scanEnvironments +// ────────────────────────────────────────────────────────────────────────────── + +describe('scanEnvironments', () => { + it('returns only detected environments when some paths exist', () => { + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('CLAUDE.md')) + + const result = scanEnvironments(CWD) + + expect(result).toHaveLength(1) + expect(result[0].id).toBe('claude-code') + expect(result[0].detected).toBe(true) + }) + + it('returns empty array when no paths exist', () => { + vi.mocked(existsSync).mockReturnValue(false) + + const result = scanEnvironments(CWD) + + expect(result).toHaveLength(0) + }) + + it('marks JSON file as unreadable when it exists but cannot be parsed', () => { + // .mcp.json exists but contains invalid JSON + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('.mcp.json')) + vi.mocked(readFileSync).mockReturnValue('not valid json {{{}') + + const result = scanEnvironments(CWD) + + expect(result).toHaveLength(1) + const env = result[0] + expect(env.id).toBe('claude-code') + + const mcpJsonStatus = env.projectPaths.find((s) => s.path.endsWith('.mcp.json')) + expect(mcpJsonStatus).toBeDefined() + expect(mcpJsonStatus.exists).toBe(true) + expect(mcpJsonStatus.readable).toBe(false) + expect(env.unreadable).toHaveLength(1) + expect(env.unreadable[0]).toMatch(/.mcp.json$/) + }) + + it('marks JSON file as readable when it exists and parses successfully', () => { + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('.mcp.json')) + vi.mocked(readFileSync).mockReturnValue('{"mcpServers":{}}') + + const result = scanEnvironments(CWD) + + expect(result).toHaveLength(1) + const mcpJsonStatus = result[0].projectPaths.find((s) => s.path.endsWith('.mcp.json')) + expect(mcpJsonStatus.exists).toBe(true) + expect(mcpJsonStatus.readable).toBe(true) + expect(result[0].unreadable).toHaveLength(0) + }) + + it('computes scope as "project" when only project paths exist', () => { + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('CLAUDE.md')) + + const result = scanEnvironments(CWD) + + expect(result[0].scope).toBe('project') + }) + + it('computes scope as "global" when only global paths exist', () => { + // gemini-cli has both project (GEMINI.md) and global paths; trigger only global + vi.mocked(existsSync).mockImplementation((p) => String(p).includes('.gemini/settings.json')) + + const result = scanEnvironments(CWD) + + const gemini = result.find((e) => e.id === 'gemini-cli') + expect(gemini).toBeDefined() + expect(gemini.scope).toBe('global') + }) + + it('computes scope as "both" when project and global paths both exist', () => { + // GEMINI.md (project) + ~/.gemini/settings.json (global) + vi.mocked(existsSync).mockImplementation( + (p) => String(p).endsWith('GEMINI.md') || String(p).includes('.gemini/settings.json'), + ) + + const result = scanEnvironments(CWD) + + const gemini = result.find((e) => e.id === 'gemini-cli') + expect(gemini).toBeDefined() + expect(gemini.scope).toBe('both') + }) + + it('each detected environment has the correct supportedCategories', () => { + // Make every first project/global path of every env exist + const firstProjectPaths = [ + 'copilot-instructions.md', // vscode-copilot: .github/copilot-instructions.md + 'CLAUDE.md', // claude-code + 'AGENTS.md', // opencode + 'GEMINI.md', // gemini-cli + ] + + vi.mocked(existsSync).mockImplementation((p) => { + const str = String(p) + return firstProjectPaths.some((fp) => str.endsWith(fp)) || str.includes('.copilot/config.json') + }) + + const result = scanEnvironments(CWD) + + const byId = Object.fromEntries(result.map((e) => [e.id, e])) + + expect(byId['vscode-copilot']?.supportedCategories).toEqual(['mcp', 'command', 'skill', 'agent']) + expect(byId['claude-code']?.supportedCategories).toEqual(['mcp', 'command', 'skill', 'agent']) + expect(byId['opencode']?.supportedCategories).toEqual(['mcp', 'command', 'skill', 'agent']) + expect(byId['gemini-cli']?.supportedCategories).toEqual(['mcp', 'command']) + expect(byId['copilot-cli']?.supportedCategories).toEqual(['mcp', 'command', 'skill', 'agent']) + }) + + it('non-JSON paths are always readable when they exist', () => { + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('CLAUDE.md')) + + const result = scanEnvironments(CWD) + + const claudeMdStatus = result[0].projectPaths.find((s) => s.path.endsWith('CLAUDE.md')) + expect(claudeMdStatus.exists).toBe(true) + expect(claudeMdStatus.readable).toBe(true) + expect(result[0].unreadable).toHaveLength(0) + }) + + it('initialises counts to all zeros', () => { + vi.mocked(existsSync).mockImplementation((p) => String(p).endsWith('CLAUDE.md')) + + const result = scanEnvironments(CWD) + + expect(result[0].counts).toEqual({mcp: 0, command: 0, skill: 0, agent: 0}) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getCompatibleEnvironments +// ────────────────────────────────────────────────────────────────────────────── + +describe('getCompatibleEnvironments', () => { + /** @type {import('../../../src/services/ai-env-scanner.js').DetectedEnvironment[]} */ + const allDetected = [ + { + id: 'vscode-copilot', + name: 'VS Code Copilot', + detected: true, + projectPaths: [], + globalPaths: [], + unreadable: [], + supportedCategories: ['mcp', 'command', 'skill', 'agent'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'project', + }, + { + id: 'claude-code', + name: 'Claude Code', + detected: true, + projectPaths: [], + globalPaths: [], + unreadable: [], + supportedCategories: ['mcp', 'command', 'skill', 'agent'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'project', + }, + { + id: 'opencode', + name: 'OpenCode', + detected: true, + projectPaths: [], + globalPaths: [], + unreadable: [], + supportedCategories: ['mcp', 'command', 'skill', 'agent'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'project', + }, + { + id: 'gemini-cli', + name: 'Gemini CLI', + detected: true, + projectPaths: [], + globalPaths: [], + unreadable: [], + supportedCategories: ['mcp', 'command'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'global', + }, + { + id: 'copilot-cli', + name: 'GitHub Copilot CLI', + detected: true, + projectPaths: [], + globalPaths: [], + unreadable: [], + supportedCategories: ['mcp', 'command', 'skill', 'agent'], + counts: {mcp: 0, command: 0, skill: 0, agent: 0}, + scope: 'global', + }, + ] + + it('filters by type "agent" — excludes gemini-cli', () => { + const result = getCompatibleEnvironments('agent', allDetected) + + expect(result).not.toContain('gemini-cli') + expect(result).toContain('vscode-copilot') + expect(result).toContain('claude-code') + expect(result).toContain('opencode') + expect(result).toContain('copilot-cli') + }) + + it('filters by type "skill" — excludes gemini-cli', () => { + const result = getCompatibleEnvironments('skill', allDetected) + + expect(result).not.toContain('gemini-cli') + expect(result).toHaveLength(4) + }) + + it('returns all env ids when type is "mcp" (every env supports mcp)', () => { + const result = getCompatibleEnvironments('mcp', allDetected) + + expect(result).toHaveLength(5) + expect(result).toContain('gemini-cli') + }) + + it('returns all env ids when type is "command" (every env supports command)', () => { + const result = getCompatibleEnvironments('command', allDetected) + + expect(result).toHaveLength(5) + }) + + it('returns empty array when detectedEnvs is empty', () => { + expect(getCompatibleEnvironments('mcp', [])).toHaveLength(0) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// computeCategoryCounts +// ────────────────────────────────────────────────────────────────────────────── + +describe('computeCategoryCounts', () => { + it('counts active entries for the given environment', () => { + /** @type {import('../../../src/types.js').CategoryEntry[]} */ + const entries = [ + { + id: '1', + name: 'my-mcp', + type: 'mcp', + active: true, + environments: ['claude-code'], + params: {transport: 'stdio', command: 'node', args: ['server.js']}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + { + id: '2', + name: 'my-command', + type: 'command', + active: true, + environments: ['claude-code', 'vscode-copilot'], + params: {content: 'do something'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + { + id: '3', + name: 'my-agent', + type: 'agent', + active: true, + environments: ['claude-code'], + params: {instructions: 'be helpful'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + ] + + const counts = computeCategoryCounts('claude-code', entries) + + expect(counts).toEqual({mcp: 1, command: 1, skill: 0, agent: 1}) + }) + + it('excludes inactive entries', () => { + /** @type {import('../../../src/types.js').CategoryEntry[]} */ + const entries = [ + { + id: '1', + name: 'disabled-mcp', + type: 'mcp', + active: false, + environments: ['claude-code'], + params: {transport: 'stdio', command: 'node', args: []}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + { + id: '2', + name: 'active-command', + type: 'command', + active: true, + environments: ['claude-code'], + params: {content: 'do something'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + ] + + const counts = computeCategoryCounts('claude-code', entries) + + expect(counts.mcp).toBe(0) + expect(counts.command).toBe(1) + }) + + it('returns all zeros when no entries match the environment', () => { + /** @type {import('../../../src/types.js').CategoryEntry[]} */ + const entries = [ + { + id: '1', + name: 'vscode-mcp', + type: 'mcp', + active: true, + environments: ['vscode-copilot'], + params: {transport: 'stdio', command: 'node', args: []}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + ] + + const counts = computeCategoryCounts('claude-code', entries) + + expect(counts).toEqual({mcp: 0, command: 0, skill: 0, agent: 0}) + }) + + it('returns all zeros when entries array is empty', () => { + const counts = computeCategoryCounts('claude-code', []) + + expect(counts).toEqual({mcp: 0, command: 0, skill: 0, agent: 0}) + }) + + it('counts entries correctly when env appears in a multi-env list', () => { + /** @type {import('../../../src/types.js').CategoryEntry[]} */ + const entries = [ + { + id: '1', + name: 'shared-skill', + type: 'skill', + active: true, + environments: ['claude-code', 'opencode', 'vscode-copilot'], + params: {content: 'skill content'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + { + id: '2', + name: 'claude-only-skill', + type: 'skill', + active: true, + environments: ['claude-code'], + params: {content: 'another skill'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + }, + ] + + const counts = computeCategoryCounts('claude-code', entries) + + expect(counts.skill).toBe(2) + expect(counts.mcp).toBe(0) + }) +}) diff --git a/tests/unit/services/audit-detector.test.js b/tests/unit/services/audit-detector.test.js index 159dbc8..ee7d2cc 100644 --- a/tests/unit/services/audit-detector.test.js +++ b/tests/unit/services/audit-detector.test.js @@ -1,21 +1,21 @@ -import { describe, it, expect, afterEach } from 'vitest' -import { resolve, join } from 'node:path' -import { tmpdir } from 'node:os' -import { mkdirSync, writeFileSync, rmSync } from 'node:fs' +import {describe, it, expect, afterEach} from 'vitest' +import {resolve, join} from 'node:path' +import {tmpdir} from 'node:os' +import {mkdirSync, writeFileSync, rmSync} from 'node:fs' describe('detectEcosystems', () => { let tmpDir afterEach(() => { if (tmpDir) { - rmSync(tmpDir, { recursive: true, force: true }) + rmSync(tmpDir, {recursive: true, force: true}) tmpDir = undefined } }) function makeTmpDir(...lockFiles) { tmpDir = join(tmpdir(), `dvmi-test-${Date.now()}-${Math.random().toString(36).slice(2)}`) - mkdirSync(tmpDir, { recursive: true }) + mkdirSync(tmpDir, {recursive: true}) for (const f of lockFiles) { writeFileSync(join(tmpDir, f), '{}') } @@ -24,13 +24,13 @@ describe('detectEcosystems', () => { it('returns empty array when no lock files are present', async () => { const dir = makeTmpDir() - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') expect(detectEcosystems(dir)).toHaveLength(0) }) it('detects pnpm when pnpm-lock.yaml present', async () => { const dir = makeTmpDir('pnpm-lock.yaml') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems).toHaveLength(1) expect(ecosystems[0].name).toBe('pnpm') @@ -39,7 +39,7 @@ describe('detectEcosystems', () => { it('detects npm when package-lock.json present', async () => { const dir = makeTmpDir('package-lock.json') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems).toHaveLength(1) expect(ecosystems[0].name).toBe('npm') @@ -47,7 +47,7 @@ describe('detectEcosystems', () => { it('detects yarn when yarn.lock present', async () => { const dir = makeTmpDir('yarn.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems).toHaveLength(1) expect(ecosystems[0].name).toBe('yarn') @@ -55,7 +55,7 @@ describe('detectEcosystems', () => { it('pnpm takes priority over npm and yarn when all three present', async () => { const dir = makeTmpDir('pnpm-lock.yaml', 'package-lock.json', 'yarn.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) // Only one Node.js ecosystem should be detected const nodeEcos = ecosystems.filter((e) => ['pnpm', 'npm', 'yarn'].includes(e.name)) @@ -65,7 +65,7 @@ describe('detectEcosystems', () => { it('npm takes priority over yarn when both present without pnpm', async () => { const dir = makeTmpDir('package-lock.json', 'yarn.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) const nodeEcos = ecosystems.filter((e) => ['npm', 'yarn'].includes(e.name)) expect(nodeEcos).toHaveLength(1) @@ -74,14 +74,14 @@ describe('detectEcosystems', () => { it('detects cargo when Cargo.lock present', async () => { const dir = makeTmpDir('Cargo.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems.some((e) => e.name === 'cargo')).toBe(true) }) it('detects multiple ecosystems simultaneously', async () => { const dir = makeTmpDir('pnpm-lock.yaml', 'Cargo.lock', 'Gemfile.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems).toHaveLength(3) const names = ecosystems.map((e) => e.name) @@ -92,21 +92,21 @@ describe('detectEcosystems', () => { it('sets lockFilePath to an absolute path', async () => { const dir = makeTmpDir('pnpm-lock.yaml') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems[0].lockFilePath).toBe(resolve(dir, 'pnpm-lock.yaml')) }) it('detects pip with Pipfile.lock', async () => { const dir = makeTmpDir('Pipfile.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems.some((e) => e.name === 'pip')).toBe(true) }) it('detects composer with composer.lock', async () => { const dir = makeTmpDir('composer.lock') - const { detectEcosystems } = await import('../../../src/services/audit-detector.js') + const {detectEcosystems} = await import('../../../src/services/audit-detector.js') const ecosystems = detectEcosystems(dir) expect(ecosystems.some((e) => e.name === 'composer')).toBe(true) }) diff --git a/tests/unit/typewriter.test.js b/tests/unit/typewriter.test.js index 8a1c8f7..3769025 100644 --- a/tests/unit/typewriter.test.js +++ b/tests/unit/typewriter.test.js @@ -1,8 +1,12 @@ -import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import {describe, it, expect, vi, beforeEach, afterEach} from 'vitest' vi.mock('../../src/utils/gradient.js', () => ({ isAnimationEnabled: false, // always no-animation in tests - BRAND_GRADIENT: [[255, 107, 43], [204, 34, 68], [136, 34, 170]], + BRAND_GRADIENT: [ + [255, 107, 43], + [204, 34, 68], + [136, 34, 170], + ], gradientText: vi.fn((text) => text), // pass-through in tests })) @@ -21,28 +25,33 @@ describe('typewriter', () => { }) it('prints text immediately when not animated (no-TTY)', async () => { - const { typewriter } = await import('../../src/utils/typewriter.js') + const {typewriter} = await import('../../src/utils/typewriter.js') await typewriter('hello world') expect(writtenOutput).toContain('hello world') expect(writtenOutput).toContain('\n') }) it('prints text with gradient when gradient option provided', async () => { - const { gradientText } = await import('../../src/utils/gradient.js') - const { typewriter } = await import('../../src/utils/typewriter.js') - await typewriter('test', { gradient: [[255, 0, 0], [0, 0, 255]] }) + const {gradientText} = await import('../../src/utils/gradient.js') + const {typewriter} = await import('../../src/utils/typewriter.js') + await typewriter('test', { + gradient: [ + [255, 0, 0], + [0, 0, 255], + ], + }) expect(gradientText).toHaveBeenCalled() }) it('typewriterLine uses BRAND_GRADIENT', async () => { - const { gradientText, BRAND_GRADIENT } = await import('../../src/utils/gradient.js') - const { typewriterLine } = await import('../../src/utils/typewriter.js') + const {gradientText, BRAND_GRADIENT} = await import('../../src/utils/gradient.js') + const {typewriterLine} = await import('../../src/utils/typewriter.js') await typewriterLine('done!') expect(gradientText).toHaveBeenCalledWith(expect.any(String), BRAND_GRADIENT) }) it('typewriter completes and writes newline', async () => { - const { typewriter } = await import('../../src/utils/typewriter.js') + const {typewriter} = await import('../../src/utils/typewriter.js') await typewriter('abc') expect(writtenOutput.endsWith('\n')).toBe(true) }) diff --git a/tests/unit/utils/modal.test.js b/tests/unit/utils/modal.test.js index 67270a0..31133b9 100644 --- a/tests/unit/utils/modal.test.js +++ b/tests/unit/utils/modal.test.js @@ -1,5 +1,10 @@ -import { describe, it, expect } from 'vitest' -import { buildModalScreen, buildLoadingScreen, buildErrorScreen, handleModalKeypress } from '../../../src/utils/tui/modal.js' +import {describe, it, expect} from 'vitest' +import { + buildModalScreen, + buildLoadingScreen, + buildErrorScreen, + handleModalKeypress, +} from '../../../src/utils/tui/modal.js' // ────────────────────────────────────────────────────────────────────────────── // Helpers @@ -11,7 +16,10 @@ import { buildModalScreen, buildLoadingScreen, buildErrorScreen, handleModalKeyp * @returns {string} */ function stripAnsi(str) { - return str.replace(/\x1b\[[0-9;]*m/g, '').replace(/\x1b\[2J/g, '').replace(/\x1b\[H/g, '') + return str + .replace(/\x1b\[[0-9;]*m/g, '') + .replace(/\x1b\[2J/g, '') + .replace(/\x1b\[H/g, '') } /** @@ -21,7 +29,7 @@ function stripAnsi(str) { */ function makeState(overrides = {}) { return { - rows: [{ id: 'CVE-2024-0001' }], + rows: [{id: 'CVE-2024-0001'}], columns: [], heading: 'Test', totalResults: 1, @@ -55,7 +63,7 @@ describe('buildModalScreen', () => { }) it('contains modal content lines', () => { - const state = makeState({ modalContent: ['Hello World', 'Second line'] }) + const state = makeState({modalContent: ['Hello World', 'Second line']}) const output = stripAnsi(buildModalScreen(state)) expect(output).toContain('Hello World') expect(output).toContain('Second line') @@ -72,21 +80,21 @@ describe('buildModalScreen', () => { }) it('contains "o open ref" hint when firstRefUrl is set', () => { - const state = makeState({ firstRefUrl: 'https://example.com/cve' }) + const state = makeState({firstRefUrl: 'https://example.com/cve'}) const output = stripAnsi(buildModalScreen(state)) expect(output).toContain('o open ref') }) it('does NOT contain "o open ref" hint when firstRefUrl is null', () => { - const state = makeState({ firstRefUrl: null }) + const state = makeState({firstRefUrl: null}) const output = stripAnsi(buildModalScreen(state)) expect(output).not.toContain('o open ref') }) it('respects modalScrollOffset to show different content lines', () => { - const content = Array.from({ length: 30 }, (_, i) => `Line ${i + 1}`) - const stateTop = makeState({ modalContent: content, modalScrollOffset: 0 }) - const stateScrolled = makeState({ modalContent: content, modalScrollOffset: 10 }) + const content = Array.from({length: 30}, (_, i) => `Line ${i + 1}`) + const stateTop = makeState({modalContent: content, modalScrollOffset: 0}) + const stateScrolled = makeState({modalContent: content, modalScrollOffset: 10}) const outTop = stripAnsi(buildModalScreen(stateTop)) const outScrolled = stripAnsi(buildModalScreen(stateScrolled)) expect(outTop).toContain('Line 1') @@ -95,7 +103,7 @@ describe('buildModalScreen', () => { }) it('handles empty modalContent gracefully', () => { - const state = makeState({ modalContent: [] }) + const state = makeState({modalContent: []}) expect(() => buildModalScreen(state)).not.toThrow() }) }) @@ -151,77 +159,77 @@ describe('buildErrorScreen', () => { describe('handleModalKeypress', () => { it('returns { backToTable: true } on Esc', () => { - const result = handleModalKeypress(makeState(), { name: 'escape' }) - expect(result).toEqual({ backToTable: true }) + const result = handleModalKeypress(makeState(), {name: 'escape'}) + expect(result).toEqual({backToTable: true}) }) it('returns { exit: true } on q', () => { - const result = handleModalKeypress(makeState(), { name: 'q' }) - expect(result).toEqual({ exit: true }) + const result = handleModalKeypress(makeState(), {name: 'q'}) + expect(result).toEqual({exit: true}) }) it('returns { exit: true } on Ctrl+C', () => { - const result = handleModalKeypress(makeState(), { name: 'c', ctrl: true }) - expect(result).toEqual({ exit: true }) + const result = handleModalKeypress(makeState(), {name: 'c', ctrl: true}) + expect(result).toEqual({exit: true}) }) it('returns { openUrl } when o is pressed and firstRefUrl is set', () => { - const state = makeState({ firstRefUrl: 'https://nvd.nist.gov/cve/123' }) - const result = handleModalKeypress(state, { name: 'o' }) - expect(result).toEqual({ openUrl: 'https://nvd.nist.gov/cve/123' }) + const state = makeState({firstRefUrl: 'https://nvd.nist.gov/cve/123'}) + const result = handleModalKeypress(state, {name: 'o'}) + expect(result).toEqual({openUrl: 'https://nvd.nist.gov/cve/123'}) }) it('does NOT open URL when o is pressed but firstRefUrl is null', () => { - const state = makeState({ firstRefUrl: null }) - const result = handleModalKeypress(state, { name: 'o' }) + const state = makeState({firstRefUrl: null}) + const result = handleModalKeypress(state, {name: 'o'}) // Should return unchanged state (no openUrl control object) expect(result).not.toHaveProperty('openUrl') }) it('decrements modalScrollOffset on up arrow (not below 0)', () => { - const state = makeState({ modalScrollOffset: 5, modalContent: Array(30).fill('x') }) - const result = handleModalKeypress(state, { name: 'up' }) - expect(result).toMatchObject({ modalScrollOffset: 4 }) + const state = makeState({modalScrollOffset: 5, modalContent: Array(30).fill('x')}) + const result = handleModalKeypress(state, {name: 'up'}) + expect(result).toMatchObject({modalScrollOffset: 4}) }) it('does not go below 0 on up at the top', () => { - const state = makeState({ modalScrollOffset: 0, modalContent: Array(30).fill('x') }) - const result = handleModalKeypress(state, { name: 'up' }) - expect(result).toMatchObject({ modalScrollOffset: 0 }) + const state = makeState({modalScrollOffset: 0, modalContent: Array(30).fill('x')}) + const result = handleModalKeypress(state, {name: 'up'}) + expect(result).toMatchObject({modalScrollOffset: 0}) }) it('increments modalScrollOffset on down arrow', () => { - const state = makeState({ modalScrollOffset: 0, modalContent: Array(30).fill('x') }) - const result = handleModalKeypress(state, { name: 'down' }) - expect(result).toMatchObject({ modalScrollOffset: 1 }) + const state = makeState({modalScrollOffset: 0, modalContent: Array(30).fill('x')}) + const result = handleModalKeypress(state, {name: 'down'}) + expect(result).toMatchObject({modalScrollOffset: 1}) }) it('clamps modalScrollOffset at max on down arrow', () => { // 30 content lines, viewportHeight = 24 - 3 - 4 = 17 → maxOffset = 30 - 17 = 13 const content = Array(30).fill('x') - const state = makeState({ modalScrollOffset: 13, modalContent: content, termRows: 24 }) - const result = handleModalKeypress(state, { name: 'down' }) - expect(result).toMatchObject({ modalScrollOffset: 13 }) // already at max + const state = makeState({modalScrollOffset: 13, modalContent: content, termRows: 24}) + const result = handleModalKeypress(state, {name: 'down'}) + expect(result).toMatchObject({modalScrollOffset: 13}) // already at max }) it('moves by contentViewport on pagedown', () => { const content = Array(50).fill('x') - const state = makeState({ modalScrollOffset: 0, modalContent: content, termRows: 24 }) - const result = handleModalKeypress(state, { name: 'pagedown' }) + const state = makeState({modalScrollOffset: 0, modalContent: content, termRows: 24}) + const result = handleModalKeypress(state, {name: 'pagedown'}) // contentViewport = 24 - 3 - 4 = 17 - expect(result).toMatchObject({ modalScrollOffset: 17 }) + expect(result).toMatchObject({modalScrollOffset: 17}) }) it('moves by contentViewport on pageup', () => { const content = Array(50).fill('x') - const state = makeState({ modalScrollOffset: 20, modalContent: content, termRows: 24 }) - const result = handleModalKeypress(state, { name: 'pageup' }) - expect(result).toMatchObject({ modalScrollOffset: 3 }) // 20 - 17 + const state = makeState({modalScrollOffset: 20, modalContent: content, termRows: 24}) + const result = handleModalKeypress(state, {name: 'pageup'}) + expect(result).toMatchObject({modalScrollOffset: 3}) // 20 - 17 }) it('returns unchanged state for unrecognized key', () => { const state = makeState() - const result = handleModalKeypress(state, { name: 'f2' }) + const result = handleModalKeypress(state, {name: 'f2'}) expect(result).toBe(state) }) }) diff --git a/tests/unit/utils/navigable-table.test.js b/tests/unit/utils/navigable-table.test.js index 007c092..4ac6a2b 100644 --- a/tests/unit/utils/navigable-table.test.js +++ b/tests/unit/utils/navigable-table.test.js @@ -1,4 +1,4 @@ -import { describe, it, expect } from 'vitest' +import {describe, it, expect} from 'vitest' import { computeViewport, formatRow, @@ -13,33 +13,33 @@ import { describe('computeViewport', () => { it('starts at 0 when selectedIndex is at top', () => { - const { startIndex, endIndex } = computeViewport(0, 100, 10) + const {startIndex, endIndex} = computeViewport(0, 100, 10) expect(startIndex).toBe(0) expect(endIndex).toBe(10) }) it('centers the selected row in the viewport', () => { // Selected = 20, viewportHeight = 10 → center bias: 20 - 5 = 15 - const { startIndex, endIndex } = computeViewport(20, 100, 10) + const {startIndex, endIndex} = computeViewport(20, 100, 10) expect(startIndex).toBe(15) expect(endIndex).toBe(25) }) it('clamps startIndex so last page is always full', () => { // selectedIndex near the end - const { startIndex, endIndex } = computeViewport(98, 100, 10) + const {startIndex, endIndex} = computeViewport(98, 100, 10) expect(startIndex).toBe(90) expect(endIndex).toBe(100) }) it('handles totalRows < viewportHeight', () => { - const { startIndex, endIndex } = computeViewport(2, 5, 10) + const {startIndex, endIndex} = computeViewport(2, 5, 10) expect(startIndex).toBe(0) expect(endIndex).toBe(5) }) it('returns endIndex = totalRows when remaining rows < viewportHeight', () => { - const { startIndex, endIndex } = computeViewport(0, 3, 10) + const {startIndex, endIndex} = computeViewport(0, 3, 10) expect(startIndex).toBe(0) expect(endIndex).toBe(3) }) @@ -51,12 +51,12 @@ describe('computeViewport', () => { describe('formatRow', () => { const columns = [ - { header: 'ID', key: 'id', width: 10 }, - { header: 'Name', key: 'name', width: 15 }, + {header: 'ID', key: 'id', width: 10}, + {header: 'Name', key: 'name', width: 15}, ] it('pads cells to column width', () => { - const row = { id: 'abc', name: 'hello' } + const row = {id: 'abc', name: 'hello'} const result = formatRow(row, columns, 80, false) // Strip ANSI const plain = result.replace(/\x1b\[[0-9;]*m/g, '') @@ -65,33 +65,33 @@ describe('formatRow', () => { }) it('truncates with … when value exceeds column width', () => { - const row = { id: 'A'.repeat(20), name: 'B' } + const row = {id: 'A'.repeat(20), name: 'B'} const result = formatRow(row, columns, 80, false) const plain = result.replace(/\x1b\[[0-9;]*m/g, '') expect(plain).toContain('…') }) it('wraps the line in ANSI inverse video when isSelected = true', () => { - const row = { id: 'x', name: 'y' } + const row = {id: 'x', name: 'y'} const result = formatRow(row, columns, 80, true) - expect(result).toContain('\x1b[7m') // ANSI inverse on + expect(result).toContain('\x1b[7m') // ANSI inverse on expect(result).toContain('\x1b[27m') // ANSI inverse off }) it('does NOT wrap in inverse when isSelected = false', () => { - const row = { id: 'x', name: 'y' } + const row = {id: 'x', name: 'y'} const result = formatRow(row, columns, 80, false) expect(result).not.toContain('\x1b[7m') }) it('applies colorize function to the padded cell', () => { - const colorCols = [{ header: 'ID', key: 'id', width: 5, colorize: (v) => `[${v}]` }] - const result = formatRow({ id: 'abc' }, colorCols, 80, false) + const colorCols = [{header: 'ID', key: 'id', width: 5, colorize: (v) => `[${v}]`}] + const result = formatRow({id: 'abc'}, colorCols, 80, false) expect(result).toContain('[abc ]') }) it('handles missing key gracefully', () => { - const row = { id: 'only' } + const row = {id: 'only'} expect(() => formatRow(row, columns, 80, false)).not.toThrow() }) }) @@ -130,7 +130,7 @@ describe('handleTableKeypress', () => { */ function makeState(overrides = {}) { return { - rows: [{ id: 'A' }, { id: 'B' }, { id: 'C' }], + rows: [{id: 'A'}, {id: 'B'}, {id: 'C'}], columns: [], heading: 'Test', totalResults: 3, @@ -149,58 +149,58 @@ describe('handleTableKeypress', () => { } it('returns { exit: true } on Esc key', () => { - const result = handleTableKeypress(makeState(), { name: 'escape' }) - expect(result).toEqual({ exit: true }) + const result = handleTableKeypress(makeState(), {name: 'escape'}) + expect(result).toEqual({exit: true}) }) it('returns { exit: true } on q key', () => { - const result = handleTableKeypress(makeState(), { name: 'q' }) - expect(result).toEqual({ exit: true }) + const result = handleTableKeypress(makeState(), {name: 'q'}) + expect(result).toEqual({exit: true}) }) it('returns { exit: true } on Ctrl+C', () => { - const result = handleTableKeypress(makeState(), { name: 'c', ctrl: true }) - expect(result).toEqual({ exit: true }) + const result = handleTableKeypress(makeState(), {name: 'c', ctrl: true}) + expect(result).toEqual({exit: true}) }) it('switches to modal view on Enter', () => { - const result = handleTableKeypress(makeState(), { name: 'return' }) - expect(result).toMatchObject({ currentView: 'modal' }) + const result = handleTableKeypress(makeState(), {name: 'return'}) + expect(result).toMatchObject({currentView: 'modal'}) }) it('decrements selectedIndex on up arrow (not below 0)', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 1 }), { name: 'up' }) - expect(result).toMatchObject({ selectedIndex: 0 }) + const result = handleTableKeypress(makeState({selectedIndex: 1}), {name: 'up'}) + expect(result).toMatchObject({selectedIndex: 0}) }) it('does not go below 0 on up arrow at first row', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 0 }), { name: 'up' }) - expect(result).toMatchObject({ selectedIndex: 0 }) + const result = handleTableKeypress(makeState({selectedIndex: 0}), {name: 'up'}) + expect(result).toMatchObject({selectedIndex: 0}) }) it('increments selectedIndex on down arrow', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 1 }), { name: 'down' }) - expect(result).toMatchObject({ selectedIndex: 2 }) + const result = handleTableKeypress(makeState({selectedIndex: 1}), {name: 'down'}) + expect(result).toMatchObject({selectedIndex: 2}) }) it('does not exceed last row on down arrow', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 2 }), { name: 'down' }) - expect(result).toMatchObject({ selectedIndex: 2 }) + const result = handleTableKeypress(makeState({selectedIndex: 2}), {name: 'down'}) + expect(result).toMatchObject({selectedIndex: 2}) }) it('moves by viewportHeight on pagedown', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 0, viewportHeight: 2 }), { name: 'pagedown' }) - expect(result).toMatchObject({ selectedIndex: 2 }) + const result = handleTableKeypress(makeState({selectedIndex: 0, viewportHeight: 2}), {name: 'pagedown'}) + expect(result).toMatchObject({selectedIndex: 2}) }) it('moves by viewportHeight on pageup', () => { - const result = handleTableKeypress(makeState({ selectedIndex: 2, viewportHeight: 2 }), { name: 'pageup' }) - expect(result).toMatchObject({ selectedIndex: 0 }) + const result = handleTableKeypress(makeState({selectedIndex: 2, viewportHeight: 2}), {name: 'pageup'}) + expect(result).toMatchObject({selectedIndex: 0}) }) it('returns unchanged state for unrecognized key', () => { const state = makeState() - const result = handleTableKeypress(state, { name: 'f1' }) + const result = handleTableKeypress(state, {name: 'f1'}) expect(result).toBe(state) // same reference }) }) @@ -212,10 +212,19 @@ describe('handleTableKeypress', () => { describe('buildTableScreen', () => { it('renders without throwing and contains heading and footer attribution', () => { const state = createInteractiveTableState( - [{ id: 'CVE-2024-0001', severity: 'High', score: '8.0', published: '2024-01-01', description: 'Test CVE', reference: 'https://example.com' }], [ - { header: 'CVE ID', key: 'id', width: 20 }, - { header: 'Severity', key: 'severity', width: 10 }, + { + id: 'CVE-2024-0001', + severity: 'High', + score: '8.0', + published: '2024-01-01', + description: 'Test CVE', + reference: 'https://example.com', + }, + ], + [ + {header: 'CVE ID', key: 'id', width: 20}, + {header: 'Severity', key: 'severity', width: 10}, ], 'CVE Search: "test"', 1, @@ -223,7 +232,10 @@ describe('buildTableScreen', () => { 80, ) const output = buildTableScreen(state) - const plain = output.replace(/\x1b\[[0-9;]*m/g, '').replace(/\x1b\[2J/g, '').replace(/\x1b\[H/g, '') + const plain = output + .replace(/\x1b\[[0-9;]*m/g, '') + .replace(/\x1b\[2J/g, '') + .replace(/\x1b\[H/g, '') expect(plain).toContain('CVE Search: "test"') expect(plain).toContain('CVE ID') expect(plain).toContain('NVD') diff --git a/tests/unit/utils/tui/form.test.js b/tests/unit/utils/tui/form.test.js new file mode 100644 index 0000000..77e1558 --- /dev/null +++ b/tests/unit/utils/tui/form.test.js @@ -0,0 +1,872 @@ +import {describe, it, expect} from 'vitest' +import { + buildFieldLine, + buildMultiSelectLines, + buildMiniEditorLines, + buildFormScreen, + handleFormKeypress, + extractValues, + getMCPFormFields, + getCommandFormFields, + getSkillFormFields, + getAgentFormFields, +} from '../../../../src/utils/tui/form.js' + +// ────────────────────────────────────────────────────────────────────────────── +// Test helpers +// ────────────────────────────────────────────────────────────────────────────── + +/** + * Strip ANSI escape codes from a string. + * @param {string} str + * @returns {string} + */ +function stripAnsi(str) { + return str.replace(/\x1b\[[0-9;]*[mGKHJ]/g, '') +} + +/** + * Build a minimal FormState for tests. + * @param {object} [overrides] + * @returns {import('../../../../src/utils/tui/form.js').FormState} + */ +function makeFormState(overrides = {}) { + return { + title: 'Test Form', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'hello', + cursor: 5, + required: true, + placeholder: '', + }, + { + type: 'selector', + label: 'Transport', + key: 'transport', + options: ['stdio', 'sse', 'streamable-http'], + selectedIndex: 0, + required: true, + }, + ], + ...overrides, + } +} + +/** + * Build a minimal FormState with all required fields filled in. + * @param {object} [overrides] + * @returns {import('../../../../src/utils/tui/form.js').FormState} + */ +function makeValidFormState(overrides = {}) { + return makeFormState({ + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'my-entry', + cursor: 8, + required: true, + placeholder: '', + }, + { + type: 'selector', + label: 'Transport', + key: 'transport', + options: ['stdio', 'sse'], + selectedIndex: 0, + required: true, + }, + ], + ...overrides, + }) +} + +/** + * Simulate a printable key event. + * @param {string} ch - Single character to type + * @returns {{ name: string, sequence: string, ctrl: boolean }} + */ +function charKey(ch) { + return {name: ch, sequence: ch, ctrl: false} +} + +/** + * Simulate a named key event (e.g. tab, backspace, return). + * @param {string} name + * @param {object} [extra] + * @returns {{ name: string, sequence?: string, ctrl?: boolean, shift?: boolean }} + */ +function namedKey(name, extra = {}) { + return {name, ...extra} +} + +// ────────────────────────────────────────────────────────────────────────────── +// buildFieldLine +// ────────────────────────────────────────────────────────────────────────────── + +describe('buildFieldLine', () => { + it('renders a TextField with cursor indicator when focused', () => { + /** @type {import('../../../../src/utils/tui/form.js').TextField} */ + const field = { + type: 'text', + label: 'Name', + value: 'hello', + cursor: 5, + required: true, + placeholder: '', + } + const line = buildFieldLine(field, true) + expect(stripAnsi(line)).toContain('Name') + expect(stripAnsi(line)).toContain('hello') + expect(stripAnsi(line)).toContain('|') + expect(line.startsWith('\x1b') || line.includes('> ')).toBe(true) + }) + + it('renders a TextField without cursor when not focused', () => { + /** @type {import('../../../../src/utils/tui/form.js').TextField} */ + const field = { + type: 'text', + label: 'Name', + value: 'hello', + cursor: 5, + required: true, + placeholder: '', + } + const line = buildFieldLine(field, false) + expect(stripAnsi(line)).toContain('hello') + expect(stripAnsi(line)).not.toContain('|') + }) + + it('renders a SelectorField with arrows', () => { + /** @type {import('../../../../src/utils/tui/form.js').SelectorField} */ + const field = { + type: 'selector', + label: 'Transport', + options: ['stdio', 'sse'], + selectedIndex: 0, + required: true, + } + const line = stripAnsi(buildFieldLine(field, false)) + expect(line).toContain('Transport') + expect(line).toContain('stdio') + expect(line).toContain('<') + expect(line).toContain('>') + }) + + it('renders a MultiSelectField with count summary', () => { + /** @type {import('../../../../src/utils/tui/form.js').MultiSelectField} */ + const field = { + type: 'multiselect', + label: 'Environments', + options: [ + {id: 'claude-code', label: 'Claude Code'}, + {id: 'opencode', label: 'OpenCode'}, + ], + selected: new Set(['claude-code']), + focusedOptionIndex: 0, + required: true, + } + const line = stripAnsi(buildFieldLine(field, false)) + expect(line).toContain('Environments') + expect(line).toContain('1/2') + }) + + it('renders a MiniEditorField with line count', () => { + /** @type {import('../../../../src/utils/tui/form.js').MiniEditorField} */ + const field = { + type: 'editor', + label: 'Content', + lines: ['line one', 'line two'], + cursorLine: 0, + cursorCol: 0, + required: true, + } + const line = stripAnsi(buildFieldLine(field, false)) + expect(line).toContain('Content') + expect(line).toContain('2 lines') + }) + + it('prefixes focused field with ">"', () => { + /** @type {import('../../../../src/utils/tui/form.js').TextField} */ + const field = { + type: 'text', + label: 'Name', + value: '', + cursor: 0, + required: true, + placeholder: '', + } + const focused = stripAnsi(buildFieldLine(field, true)) + const unfocused = stripAnsi(buildFieldLine(field, false)) + expect(focused).toContain('>') + expect(unfocused).not.toContain('>') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// buildMultiSelectLines +// ────────────────────────────────────────────────────────────────────────────── + +describe('buildMultiSelectLines', () => { + /** @type {import('../../../../src/utils/tui/form.js').MultiSelectField} */ + const field = { + type: 'multiselect', + label: 'Envs', + options: [ + {id: 'claude-code', label: 'Claude Code'}, + {id: 'opencode', label: 'OpenCode'}, + ], + selected: new Set(['claude-code']), + focusedOptionIndex: 0, + required: true, + } + + it('renders one line per option', () => { + const lines = buildMultiSelectLines(field, true, 10) + expect(lines).toHaveLength(2) + }) + + it('marks selected option with [x]', () => { + const lines = buildMultiSelectLines(field, true, 10).map(stripAnsi) + expect(lines[0]).toContain('[x]') + expect(lines[0]).toContain('Claude Code') + }) + + it('marks unselected option with [ ]', () => { + const lines = buildMultiSelectLines(field, true, 10).map(stripAnsi) + expect(lines[1]).toContain('[ ]') + expect(lines[1]).toContain('OpenCode') + }) + + it('respects maxLines limit', () => { + const lines = buildMultiSelectLines(field, true, 1) + expect(lines).toHaveLength(1) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// buildMiniEditorLines +// ────────────────────────────────────────────────────────────────────────────── + +describe('buildMiniEditorLines', () => { + /** @type {import('../../../../src/utils/tui/form.js').MiniEditorField} */ + const field = { + type: 'editor', + label: 'Content', + lines: ['hello world', 'second line'], + cursorLine: 0, + cursorCol: 5, + required: true, + } + + it('renders one line per content line', () => { + const lines = buildMiniEditorLines(field, true, 20) + expect(lines).toHaveLength(2) + }) + + it('inserts cursor on the active line when focused', () => { + const lines = buildMiniEditorLines(field, true, 20).map(stripAnsi) + expect(lines[0]).toContain('|') + }) + + it('does not insert cursor when not focused', () => { + const lines = buildMiniEditorLines(field, false, 20).map(stripAnsi) + expect(lines[0]).not.toContain('|') + }) + + it('includes line numbers', () => { + const lines = buildMiniEditorLines(field, false, 20).map(stripAnsi) + expect(lines[0]).toContain('1') + expect(lines[1]).toContain('2') + }) + + it('respects maxLines limit', () => { + const lines = buildMiniEditorLines(field, true, 1) + expect(lines).toHaveLength(1) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// buildFormScreen +// ────────────────────────────────────────────────────────────────────────────── + +describe('buildFormScreen', () => { + it('renders without throwing', () => { + const state = makeFormState() + expect(() => buildFormScreen(state, 24, 80)).not.toThrow() + }) + + it('includes the form title', () => { + const state = makeFormState({title: 'My Fancy Form'}) + const lines = buildFormScreen(state, 24, 80).map(stripAnsi).join('\n') + expect(lines).toContain('My Fancy Form') + }) + + it('includes field labels', () => { + const state = makeFormState() + const lines = buildFormScreen(state, 24, 80).map(stripAnsi).join('\n') + expect(lines).toContain('Name') + expect(lines).toContain('Transport') + }) + + it('renders the error message when set', () => { + const state = makeFormState({errorMessage: 'Something went wrong'}) + const lines = buildFormScreen(state, 24, 80).map(stripAnsi).join('\n') + expect(lines).toContain('Something went wrong') + }) + + it('includes footer hint', () => { + const state = makeFormState() + const lines = buildFormScreen(state, 24, 80).map(stripAnsi).join('\n') + expect(lines).toContain('Tab') + expect(lines).toContain('Esc') + }) + + it('returns an array of strings', () => { + const state = makeFormState() + const lines = buildFormScreen(state, 24, 80) + expect(Array.isArray(lines)).toBe(true) + for (const line of lines) { + expect(typeof line).toBe('string') + } + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — navigation +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — Tab moves to next field', () => { + it('Tab advances focusedFieldIndex', () => { + const state = makeFormState({focusedFieldIndex: 0}) + const result = handleFormKeypress(state, namedKey('tab')) + expect(result).not.toHaveProperty('cancelled') + expect(result).not.toHaveProperty('submitted') + expect(/** @type {any} */ (result).focusedFieldIndex).toBe(1) + }) + + it('Tab wraps from last field back to first', () => { + const state = makeFormState({focusedFieldIndex: 1}) + const result = handleFormKeypress(state, namedKey('tab')) + expect(/** @type {any} */ (result).focusedFieldIndex).toBe(0) + }) +}) + +describe('handleFormKeypress — Shift+Tab moves to previous field', () => { + it('Shift+Tab decrements focusedFieldIndex', () => { + const state = makeFormState({focusedFieldIndex: 1}) + const result = handleFormKeypress(state, namedKey('tab', {shift: true})) + expect(/** @type {any} */ (result).focusedFieldIndex).toBe(0) + }) + + it('Shift+Tab wraps from first field to last', () => { + const state = makeFormState({focusedFieldIndex: 0}) + const result = handleFormKeypress(state, namedKey('tab', {shift: true})) + expect(/** @type {any} */ (result).focusedFieldIndex).toBe(state.fields.length - 1) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — cancel +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — Esc cancels', () => { + it('returns { cancelled: true } when Esc is pressed on a text field', () => { + const state = makeFormState({focusedFieldIndex: 0}) + const result = handleFormKeypress(state, namedKey('escape')) + expect(result).toEqual({cancelled: true}) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — submit +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — Ctrl+S submits when valid', () => { + it('returns { submitted: true, values } when all required fields are filled', () => { + const state = makeValidFormState() + const result = handleFormKeypress(state, namedKey('s', {ctrl: true})) + expect(result).toHaveProperty('submitted', true) + expect(result).toHaveProperty('values') + expect(/** @type {any} */ (result).values.name).toBe('my-entry') + }) +}) + +describe('handleFormKeypress — Ctrl+S returns errorMessage when required field empty', () => { + it('sets errorMessage and returns FormState when required text field is empty', () => { + const state = makeFormState({ + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: '', + cursor: 0, + required: true, + placeholder: '', + }, + ], + }) + const result = handleFormKeypress(state, namedKey('s', {ctrl: true})) + expect(result).not.toHaveProperty('submitted') + expect(/** @type {any} */ (result).errorMessage).toBeTruthy() + expect(/** @type {any} */ (result).errorMessage).toContain('Name') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — TextField +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — printable char appended to TextField', () => { + it('appends character at cursor position', () => { + const state = makeFormState({ + focusedFieldIndex: 0, + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'helo', + cursor: 3, + required: true, + placeholder: '', + }, + ], + }) + const result = handleFormKeypress(state, charKey('l')) + const field = /** @type {any} */ (result).fields[0] + expect(field.value).toBe('hello') + expect(field.cursor).toBe(4) + }) +}) + +describe('handleFormKeypress — Backspace removes char before cursor', () => { + it('deletes the character immediately before the cursor', () => { + const state = makeFormState({ + focusedFieldIndex: 0, + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'hello', + cursor: 5, + required: true, + placeholder: '', + }, + ], + }) + const result = handleFormKeypress(state, namedKey('backspace')) + const field = /** @type {any} */ (result).fields[0] + expect(field.value).toBe('hell') + expect(field.cursor).toBe(4) + }) + + it('does nothing when cursor is at position 0', () => { + const state = makeFormState({ + focusedFieldIndex: 0, + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'hello', + cursor: 0, + required: true, + placeholder: '', + }, + ], + }) + const result = handleFormKeypress(state, namedKey('backspace')) + const field = /** @type {any} */ (result).fields[0] + expect(field.value).toBe('hello') + expect(field.cursor).toBe(0) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — SelectorField +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — ← → cycles SelectorField options', () => { + /** @type {import('../../../../src/utils/tui/form.js').FormState} */ + const selectorState = { + title: 'Test', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'selector', + label: 'Transport', + key: 'transport', + options: ['stdio', 'sse', 'streamable-http'], + selectedIndex: 0, + required: true, + }, + ], + } + + it('Right arrow moves to next option', () => { + const result = handleFormKeypress(selectorState, namedKey('right')) + expect(/** @type {any} */ (result).fields[0].selectedIndex).toBe(1) + }) + + it('Left arrow on first option wraps to last', () => { + const result = handleFormKeypress(selectorState, namedKey('left')) + expect(/** @type {any} */ (result).fields[0].selectedIndex).toBe(2) + }) + + it('Right arrow on last option wraps to first', () => { + const state = {...selectorState, fields: [{...selectorState.fields[0], selectedIndex: 2}]} + const result = handleFormKeypress(state, namedKey('right')) + expect(/** @type {any} */ (result).fields[0].selectedIndex).toBe(0) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — MultiSelectField +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — Space toggles MultiSelectField option', () => { + /** @type {import('../../../../src/utils/tui/form.js').FormState} */ + const msState = { + title: 'Test', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: [ + {id: 'claude-code', label: 'Claude Code'}, + {id: 'opencode', label: 'OpenCode'}, + ], + selected: new Set(['claude-code']), + focusedOptionIndex: 0, + required: true, + }, + ], + } + + it('Space deselects an already-selected option', () => { + const result = handleFormKeypress(msState, namedKey('space')) + const field = /** @type {any} */ (result).fields[0] + expect(field.selected.has('claude-code')).toBe(false) + }) + + it('Space selects an unselected option', () => { + const state = { + ...msState, + fields: [{...msState.fields[0], focusedOptionIndex: 1}], + } + const result = handleFormKeypress(state, namedKey('space')) + const field = /** @type {any} */ (result).fields[0] + expect(field.selected.has('opencode')).toBe(true) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// handleFormKeypress — MiniEditorField +// ────────────────────────────────────────────────────────────────────────────── + +describe('handleFormKeypress — Enter in MiniEditorField inserts new line', () => { + /** @type {import('../../../../src/utils/tui/form.js').FormState} */ + const editorState = { + title: 'Test', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'editor', + label: 'Content', + key: 'content', + lines: ['hello world'], + cursorLine: 0, + cursorCol: 5, + required: true, + }, + ], + } + + it('splits line at cursor on Enter', () => { + const result = handleFormKeypress(editorState, namedKey('return')) + const field = /** @type {any} */ (result).fields[0] + expect(field.lines).toHaveLength(2) + expect(field.lines[0]).toBe('hello') + expect(field.lines[1]).toBe(' world') + expect(field.cursorLine).toBe(1) + expect(field.cursorCol).toBe(0) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// extractValues +// ────────────────────────────────────────────────────────────────────────────── + +describe('extractValues', () => { + it('returns correct object from mixed form state', () => { + /** @type {import('../../../../src/utils/tui/form.js').FormState} */ + const state = { + title: 'Test', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'text', + label: 'Name', + key: 'name', + value: 'my-server', + cursor: 9, + required: true, + placeholder: '', + }, + { + type: 'selector', + label: 'Transport', + key: 'transport', + options: ['stdio', 'sse', 'streamable-http'], + selectedIndex: 1, + required: true, + }, + { + type: 'multiselect', + label: 'Environments', + key: 'environments', + options: [ + {id: 'claude-code', label: 'Claude Code'}, + {id: 'opencode', label: 'OpenCode'}, + ], + selected: new Set(['claude-code', 'opencode']), + focusedOptionIndex: 0, + required: true, + }, + { + type: 'editor', + label: 'Content', + key: 'content', + lines: ['line one', 'line two'], + cursorLine: 0, + cursorCol: 0, + required: true, + }, + ], + } + + const values = extractValues(state) + expect(values.name).toBe('my-server') + expect(values.transport).toBe('sse') + expect(values.environments).toEqual(expect.arrayContaining(['claude-code', 'opencode'])) + expect(values.content).toBe('line one\nline two') + }) + + it('uses label as key when field.key is not set', () => { + /** @type {import('../../../../src/utils/tui/form.js').FormState} */ + const state = { + title: 'Test', + focusedFieldIndex: 0, + status: 'editing', + errorMessage: null, + fields: [ + { + type: 'text', + label: 'My Field', + value: 'val', + cursor: 3, + required: true, + placeholder: '', + }, + ], + } + const values = extractValues(state) + expect(values.my_field).toBe('val') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getMCPFormFields +// ────────────────────────────────────────────────────────────────────────────── + +describe('getMCPFormFields', () => { + it('returns fields with correct labels', () => { + const fields = getMCPFormFields() + const labels = fields.map((f) => f.label) + expect(labels).toContain('Name') + expect(labels).toContain('Transport') + expect(labels).toContain('Command') + expect(labels).toContain('Args') + expect(labels).toContain('URL') + expect(labels).toContain('Description') + }) + + it('Name field is required', () => { + const fields = getMCPFormFields() + const nameField = fields.find((f) => f.label === 'Name') + expect(nameField?.required).toBe(true) + }) + + it('Transport field is a selector with stdio/sse/streamable-http', () => { + const fields = getMCPFormFields() + const transport = fields.find((f) => f.label === 'Transport') + expect(transport?.type).toBe('selector') + expect(/** @type {any} */ (transport).options).toEqual(['stdio', 'sse', 'streamable-http']) + }) + + it('returns correct number of fields', () => { + const fields = getMCPFormFields() + expect(fields.length).toBe(7) // name, environments, transport, command, args, url, description + }) +}) + +describe('getMCPFormFields with existing entry', () => { + it('pre-fills values from entry', () => { + /** @type {import('../../../../src/types.js').CategoryEntry} */ + const entry = { + id: 'abc-123', + name: 'my-mcp', + type: 'mcp', + active: true, + environments: ['claude-code'], + params: { + transport: 'sse', + url: 'https://mcp.example.com', + command: 'npx run', + args: ['--port', '3000'], + }, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + + const fields = getMCPFormFields(entry) + + const nameField = fields.find((f) => f.label === 'Name') + expect(/** @type {any} */ (nameField).value).toBe('my-mcp') + + const transportField = fields.find((f) => f.label === 'Transport') + expect(/** @type {any} */ (transportField).selectedIndex).toBe(1) // sse + + const urlField = fields.find((f) => f.label === 'URL') + expect(/** @type {any} */ (urlField).value).toBe('https://mcp.example.com') + + const argsField = fields.find((f) => f.label === 'Args') + expect(/** @type {any} */ (argsField).value).toBe('--port 3000') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getCommandFormFields +// ────────────────────────────────────────────────────────────────────────────── + +describe('getCommandFormFields', () => { + it('returns fields with Name, Description, Content labels', () => { + const fields = getCommandFormFields() + const labels = fields.map((f) => f.label) + expect(labels).toContain('Name') + expect(labels).toContain('Description') + expect(labels).toContain('Content') + }) + + it('Content field is an editor', () => { + const fields = getCommandFormFields() + const content = fields.find((f) => f.label === 'Content') + expect(content?.type).toBe('editor') + }) + + it('pre-fills values when entry is provided', () => { + /** @type {import('../../../../src/types.js').CategoryEntry} */ + const entry = { + id: 'xyz', + name: 'refactor', + type: 'command', + active: true, + environments: ['claude-code'], + params: {content: 'line one\nline two', description: 'My command'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + const fields = getCommandFormFields(entry) + const contentField = fields.find((f) => f.label === 'Content') + expect(/** @type {any} */ (contentField).lines).toEqual(['line one', 'line two']) + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getSkillFormFields +// ────────────────────────────────────────────────────────────────────────────── + +describe('getSkillFormFields', () => { + it('returns fields with Name, Description, Content labels', () => { + const fields = getSkillFormFields() + const labels = fields.map((f) => f.label) + expect(labels).toContain('Name') + expect(labels).toContain('Description') + expect(labels).toContain('Content') + }) + + it('pre-fills name from entry', () => { + /** @type {import('../../../../src/types.js').CategoryEntry} */ + const entry = { + id: 'skill-1', + name: 'my-skill', + type: 'skill', + active: true, + environments: [], + params: {content: 'skill content'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + const fields = getSkillFormFields(entry) + const nameField = fields.find((f) => f.label === 'Name') + expect(/** @type {any} */ (nameField).value).toBe('my-skill') + }) +}) + +// ────────────────────────────────────────────────────────────────────────────── +// getAgentFormFields +// ────────────────────────────────────────────────────────────────────────────── + +describe('getAgentFormFields', () => { + it('returns fields with Name, Description, Instructions labels', () => { + const fields = getAgentFormFields() + const labels = fields.map((f) => f.label) + expect(labels).toContain('Name') + expect(labels).toContain('Description') + expect(labels).toContain('Instructions') + }) + + it('Instructions field is an editor', () => { + const fields = getAgentFormFields() + const instructions = fields.find((f) => f.label === 'Instructions') + expect(instructions?.type).toBe('editor') + }) + + it('pre-fills instructions from entry', () => { + /** @type {import('../../../../src/types.js').CategoryEntry} */ + const entry = { + id: 'agent-1', + name: 'my-agent', + type: 'agent', + active: true, + environments: [], + params: {instructions: 'do this\ndo that'}, + createdAt: '2026-01-01T00:00:00Z', + updatedAt: '2026-01-01T00:00:00Z', + } + const fields = getAgentFormFields(entry) + const instructionsField = fields.find((f) => f.label === 'Instructions') + expect(/** @type {any} */ (instructionsField).lines).toEqual(['do this', 'do that']) + }) +}) diff --git a/tests/unit/validators/repo-name.test.js b/tests/unit/validators/repo-name.test.js index 849dade..fe32fb9 100644 --- a/tests/unit/validators/repo-name.test.js +++ b/tests/unit/validators/repo-name.test.js @@ -1,13 +1,13 @@ -import { describe, it, expect } from 'vitest' -import { validateRepoName } from '../../../src/validators/repo-name.js' +import {describe, it, expect} from 'vitest' +import {validateRepoName} from '../../../src/validators/repo-name.js' describe('validateRepoName', () => { it('accepts valid kebab-case name', () => { - expect(validateRepoName('my-service')).toEqual({ valid: true }) + expect(validateRepoName('my-service')).toEqual({valid: true}) }) it('accepts single word', () => { - expect(validateRepoName('myservice')).toEqual({ valid: true }) + expect(validateRepoName('myservice')).toEqual({valid: true}) }) it('rejects empty string', () => {