diff --git a/.gitignore b/.gitignore index 89a5c9189..cd759cc71 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ tags **/tmp coverage/ .claude +work-done.md diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..1be9ce2e5 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,125 @@ +# AGENTS.md - pos-cli Development Guide + +## Overview + +pos-cli is a Node.js CLI tool (v22+) for deploying and managing platformOS applications. It uses ES modules, Commander.js for CLI commands, and Vitest for testing. + +## Commands + +### Installation & Build +```bash +npm ci # Install dependencies (clean install) +npm run build # Build GUI assets (admin, graphql, next, liquid) +``` + +### Testing +```bash +npm test # Run all tests +npm run test:watch # Run tests in watch mode +npm run test:unit # Run unit tests only +npm run test:integration # Run integration tests only +npm run test:coverage # Run tests with coverage report +npm run test:mcp-min # Run MCP server tests only +npx vitest run path/to/test.js # Run single test file +npx vitest run -t "test name" # Run single test by name +``` + +### Linting +```bash +npm run lint # Check code style (fails on warnings) +npm run lint:fix # Auto-fix linting issues +``` + +### Environment +Tests require `MPKIT_URL`, `MPKIT_EMAIL`, `MPKIT_TOKEN` environment variables for integration tests. + +## Code Style Guidelines + +### Imports & Modules +- Use ES modules (`import`/`export`) - this is an ESM package (`"type": "module"`) +- Use path aliases: `import Something from '#lib/something.js'` +- Use `import` for internal modules, `require()` only for Node.js built-ins when needed + +### Formatting +- **Indentation**: 2 spaces (no tabs) +- **Quotes**: Single quotes (`'string'`), except to avoid escaping +- **Semicolons**: Required at end of statements +- **Line length**: Maximum 140 characters +- **Trailing commas**: Never use trailing commas + +### Naming Conventions +- **Files**: kebab-case (`my-module.js`) +- **Classes**: PascalCase (`Gateway`, `ServerError`) +- **Variables/functions**: camelCase (`fetchSettings`, `deployStrategy`) +- **Constants**: UPPER_SNAKE_CASE for true constants, otherwise camelCase +- **Private members**: No underscore prefix convention; use closures or WeakMaps if true privacy needed + +### Parameters +- **NEVER use underscore-prefixed parameters** (`_param`): Custom ESLint rule warns on usage +- Parameters starting with `_` signal "unused" - if you use it, remove the underscore +- Exception: `__dirname`, `__filename` are Node.js built-ins + +### Functions +- Use arrow functions for callbacks, method shorthand for object methods +- Use async/await for all async operations (no callback-style code) +- Keep functions small and focused (single responsibility) + +### Error Handling +- Use `try/catch` with async/await +- Throw `Error` objects with descriptive messages +- Use `ServerError` class from `lib/ServerError.js` for API errors +- Let errors bubble up; handle at appropriate level (CLI commands exit on error) +- Never swallow errors silently (at minimum, log them) + +### Code Patterns + +**Command files** (`bin/*.js`): Thin wrappers using Commander.js +- Parse arguments +- Fetch auth settings via `fetchSettings()` +- Delegate to lib/ modules + +**Implementation** (`lib/`): Core business logic +- Gateway class for all API communication +- Strategy pattern for deployment (see `lib/deploy/`) +- Template processing with Mustache for module configuration + +**MCP Server** (`mcp-min/`): Model Context Protocol server +- Tool definitions in modular files under feature directories +- Stdio and HTTP transports for MCP communication +- SSE streaming for real-time updates +- Partner Portal API tools for instance management + +**Testing**: +- Unit tests in `test/unit/` +- Integration tests in `test/integration/` +- MCP server tests in `mcp-min/__tests__/` +- Use `describe()`, `test()`, `expect()` from Vitest +- Fixtures in `test/fixtures/` + +### File Structure +``` +pos-cli/ +├── bin/ # CLI entry points (54 executables) +├── lib/ # Core implementation +│ ├── proxy.js # Gateway API client +│ ├── ServerError.js +│ ├── settings.js # Environment config (.pos file) +│ └── deploy/ # Deployment strategies +├── gui/ # Pre-built web apps (admin, graphql, liquid, next) +├── mcp-min/ # MCP server implementation +│ ├── tools.js # Tool definitions +│ ├── index.js # Server entry point +│ ├── stdio-server.js +│ ├── http-server.js +│ ├── sse.js # Server-sent events +│ └── portal/ # Partner Portal API tools +├── test/ # Tests and fixtures +└── scripts/ # Utility scripts +``` + +### Important Notes +- GUI directories contain pre-built apps - don't edit source there +- Configuration stored in `.pos` file (JSON format) +- `.posignore` uses gitignore syntax for sync/deploy exclusions +- Environment variables: `CI`, `DEBUG`, `NO_COLOR`, `CONCURRENCY` affect behavior +- When fixing bugs, add or update tests to prevent regressions diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6292b8220..5e202b897 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,25 +1,68 @@ -# Contributing to platformOS Check +# Contributing Guide -## Standards +## Development Workflow -* PR should explain what the feature does, and why the change exists. -* PR should include any carrier specific documentation explaining how it works. -* Code _must_ be tested. -* Be consistent. Write clean code. -* Code should be generic and reusable. - -## How to contribute +```bash +git clone +cd mcp +npm ci +npm run dev +``` -1. Fork it (https://github.com/Platform-OS/pos-cli). -2. Go into the forked repository (`cd po-cli`) and link the repo: `npm unlink .; npm uninstall -g @platformOS/pos-cli; npm link; npm install` -2. Create your feature branch (`git checkout -b my-new-feature`). -3. Commit your changes (`git commit -am 'Add some feature'`). -4. Push to the branch (`git push origin my-new-feature`). -5. Create a new Pull Request. +## Adding a New Tool -## Running Tests +1. **Create `src/tools/new.tools.ts`**: +```typescript +export const myTool: Tool = { + name: 'platformos.my.tool', + description: 'Does X', + inputSchema: z.object({ foo: z.string() }), + handler: async (input) => ({ result: input.foo.toUpperCase() }) +}; +export const myTools = [myTool]; +``` +2. **Register in `src/tools/index.ts`**: +```typescript +export const allTools = [ + ...environmentTools, + ...myTools, + // ... +]; ``` + +3. **Add tests** `src/__tests__/my.tools.test.ts` +4. **Update** [TOOLS.md](TOOLS.md) + +## Pre-commit Hooks + +```bash +npm run lint npm run test +npm run build ``` +## Release Process + +1. `npm version patch/minor/major` +2. `npm publish` +3. Update [CHANGELOG.md](CHANGELOG.md) + +## Code Standards + +- **TypeScript strict** +- **Zod for all schemas** +- **90%+ test coverage** +- **No `any` types** +- **JSDoc for complex functions** + +## Testing Layers + +| Type | Command | Purpose | +|------|---------|---------| +| Unit | `npm test` | Tool handlers, wrappers | +| Integration | `npm test` | Express endpoints | +| E2E | Manual | nock + real flows | + +--- +**Questions? Open an issue!**","path":"CONTRIBUTING.md \ No newline at end of file diff --git a/DEPLOY.md b/DEPLOY.md new file mode 100644 index 000000000..c230299aa --- /dev/null +++ b/DEPLOY.md @@ -0,0 +1,115 @@ +# DEPLOY.md - Production Deployment + +## Docker + +### Development + +```bash +docker build -t mcp-dev . +docker run -p 3030:3030 \\ + -e ADMIN_API_KEY=supersecret \\ + -v $(pwd)/.pos:/app/.pos:ro \\ + -v $(pwd)/mcp/clients.json:/app/clients.json:ro \\ + mcp-dev npm run dev +``` + +### Production Dockerfile + +```dockerfile +FROM node:20-alpine +WORKDIR /app +COPY package*.json ./ +RUN npm ci --only=production +COPY dist ./dist +COPY clients.json ./ +EXPOSE 3030 +CMD [\"npm\", \"start\"] +``` + +**Build & Run** +```bash +npm run build +docker build -t mcp-prod -f Dockerfile.prod . +docker run -p 3030:3030 \\ + -e ADMIN_API_KEY=prod-key \\ + -v /path/to/.pos:/app/.pos:ro \\ + mcp-prod +``` + +## Environment Variables + +| Var | Required | Default | Description | +|-----|----------|---------|-------------| +| `ADMIN_API_KEY` | Yes | - | Admin endpoint auth | +| `NODE_ENV` | No | `development` | Environment | +| `PORT` | No | `3030` | HTTP port | + +## Production Checklist + +- [ ] `npm run build` +- [ ] Set `ADMIN_API_KEY` +- [ ] Volume mount `.pos/envs/` (read-only) +- [ ] Volume mount `clients.json` (read-only) +- [ ] Firewall: port 3030 +- [ ] Health check: `/health` +- [ ] PM2/ systemd supervision +- [ ] Log aggregation (stdout JSON) + +## Kubernetes + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mcp-server +spec: + replicas: 2 + template: + spec: + containers: + - name: mcp + image: mcp-prod:latest + env: + - name: ADMIN_API_KEY + valueFrom: + secretKeyRef: + name: mcp-secrets + key: admin-key + volumeMounts: + - name: pos-config + mountPath: /app/.pos + readOnly: true + ports: + - containerPort: 3030 +--- +apiVersion: v1 +kind: Secret +metadata: + name: mcp-secrets +data: + admin-key: +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: clients-config +data: + clients.json: |- + { \"ai-clients\": { \"token\": \"...\" } } +``` + +## Reverse Proxy (nginx) + +```nginx +server { + listen 80; + location / { + proxy_pass http://localhost:3030; + proxy_set_header Authorization $http_authorization; + proxy_set_header X-API-Key $admin_api_key; + } +} +``` + +--- +**Recommended: Docker + nginx + CloudWatch Logs**","path":"DEPLOY.md \ No newline at end of file diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 000000000..08074b7ff --- /dev/null +++ b/TESTING.md @@ -0,0 +1,91 @@ +Testing strategy for MCP components + +Overview +-------- +This document describes the testing approach, coverage goals, test matrix, recommended libraries, and instructions to run tests locally and in CI for the mcp-min (MCP minimal) components. + +Goals +----- +- Provide deterministic unit tests for API wrappers, auth/config resolution, tools, and transport implementations (stdio, SSE). +- Provide integration tests for HTTP endpoints and JSON-RPC compatibility (/call, /call-stream) against a running mcp-min server. +- Achieve coverage thresholds for the mcp-min package and global project coverage. + +Coverage targets +---------------- +- Global: branches 70%, functions 75%, lines 80%, statements 80% +- mcp-min package: branches 80%, functions 85%, lines 90%, statements 90% + +Test matrix (CI) +---------------- +- Node versions: 18.x, 20.x +- OS: ubuntu-latest, macos-latest, windows-latest + +Test types and cases +-------------------- +Unit tests +- API wrapper classes: mock network responses, assert request shape, retries/error handling. +- Auth resolution: env vars, explicit params, .pos config precedence, missing auth errors. +- single-file helpers: normalizeLocalPath, computeRemotePath, isAssetsPath, masking tokens. +- Proxy-wrapper behavior: mock lib/proxy to ensure calls for sync/delete flow are invoked. +- Tools: echo (simple), list-envs (.pos parsing), sync.singleFile (dry-run path, validation failure, auth missing). +- stdio transport: parsing well-formed JSON-line, invalid JSON handling, unknown method errors. +- sse utilities: sseHandler framing, writeSSE escaping multiline data, heartbeat timing with fake timers. + +Integration tests +- Start mcp-min HTTP server in tests and exercise endpoints: /health, /tools, /call (success, 400, 404), /call-stream (JSON-RPC initialize/tools/list/tools/call) +- SSE streaming behavior: GET / with Accept: text/event-stream handshake, POST /call-stream streaming response bodies and initial endpoint events. +- Full tool chaining: tools that call other libs (sync.singleFile) with proxy mocked and asserting writer events. +- Error recovery: simulate gateway errors and ensure server responds with appropriate error payloads. + +End-to-end +- stdio + HTTP combined scenario where an external client uses JSON-RPC initialize, tools/list, and tools/call over HTTP and verifies SSE messages (using eventsource in real runs). + +Mock framework & fixtures +------------------------- +- Use nock to mock HTTP calls to platformOS endpoints and S3 presign/upload flows. +- Use jest.mock for internal libs (lib/proxy, lib/s3UploadFile, lib/presignUrl, lib/files) to create deterministic responses. +- Use test/utils/fixtures.js for managing temporary .pos configs. +- Use tmp or fs-extra for temp directories and files. + +Libraries recommended +--------------------- +- jest (testing framework) +- supertest (HTTP assertions) - optional in existing tests; current code uses http.request +- eventsource (EventSource polyfill) or eventsource package for SSE client tests +- nock (HTTP mocking) +- tmp / fs-extra (filesystem helpers) +- jest fake timers for heartbeat and SSE tests + +Jest config and coverage +------------------------ +- collectCoverage true, target mcp-min and lib. +- Set coverage thresholds (see Coverage targets section). +- Add test path ignore for heavy gui/next etc. + +CI job +------ +- GitHub Actions workflow at .github/workflows/ci.yml +- Matrix: node 18, 20; OS: ubuntu, macos, windows +- Steps: checkout, setup-node, npm ci, npm test, upload coverage artifact + +Files to add (initial PR) +------------------------- +- mcp-min/__tests__/http.test.js +- mcp-min/__tests__/sse.test.js +- mcp-min/__tests__/stdio.test.js +- mcp-min/__tests__/tools.test.js +- test/utils/fixtures.js +- .github/workflows/ci.yml +- TESTING.md +- package.json jest config updated with coverage settings + +Running tests locally +--------------------- +- npm ci +- npm test + +Maintainer notes +---------------- +- Expand tests to cover lib/proxy and network interactions using jest.mock + nock. +- Add integration tests that spin up a mocked S3 service if needed. +- Use supertest for more ergonomic HTTP assertions in future. diff --git a/TOOLS.md b/TOOLS.md new file mode 100644 index 000000000..fcec64fad --- /dev/null +++ b/TOOLS.md @@ -0,0 +1,234 @@ +# TOOLS.md - Complete Tool Reference + +## Available Tools (11+) + +All tools use **Zod validation** and return **JSON** in MCP `content[0].text`. + +## Environment Management + +### `platformos.env.list` +**List configured .pos environments** + +**Input Schema** +```json +{} +``` + +**Example** +```bash +curl -X POST http://localhost:3030/call \\ + -H \"Authorization: Bearer client-secret\" \\ + -d '{\"tool\":\"platformos.env.list\",\"input\":{}}' +``` + +**Output** +```json +{ + \"envs\": [{ + \"name\": \"staging\", + \"account\": \"example\", + \"site\": \"staging.example.platformos.net\" + }] +} +``` + +### `platformos.env.add` +**Add new environment** + +**Input** +```json +{ + \"name\": \"production\", + \"url\": \"https://prod.example.platformos.net\", + \"email\": \"admin@company.com\", + \"account\": \"company\", + \"token\": \"abc123...\" +} +``` + +### `platformos.env.auth` +**Verify environment auth** + +**Input** +```json +{ \"env\": \"staging\" } +``` + +## GraphQL + +### `platformos.graphql.execute` +**Execute GraphQL query/mutation** + +**Input** +```json +{ + \"env\": \"staging\", + \"query\": \"query { viewer { id email } }\", + \"variables\": { \"id\": \"123\" } +} +``` + +**Example** +```bash +curl -X POST http://localhost:3030/call \\ + -H \"Authorization: Bearer client-secret\" \\ + -d '{\"tool\":\"platformos.graphql.execute\",\"input\":{\"env\":\"staging\",\"query\":\"query { __schema { types { name } } }\"}}' +``` + +## Data Export + +### `platformos.data.export_start` +**Start async data export** + +**Input** +```json +{ + \"env\": \"staging\", + \"export_internal\": true, + \"csv_export\": false +} +``` + +**Returns** `{ \"jobId\": \"job_123\", \"poll\": true }` + +### `platformos.data.export_status` +**Check export status** + +**Input** +```json +{ + \"env\": \"staging\", + \"jobId\": \"job_123\", + \"csv_export\": false +} +``` + +## Liquid Rendering + +### `platformos.liquid.render` +**Render Liquid template** + +**Input** +```json +{ + \"env\": \"staging\", + \"template\": \"Hello {{ name }}!\", + \"locals\": { \"name\": \"World\" } +} +``` + +**Output** `{ \"output\": \"Hello World!\" }` + +## Logs ⚡ **Streaming Support** + +### `platformos.logs.fetch` +**Fetch recent logs (batch)** + +**Input** +```json +{ + \"env\": \"staging\", + \"lastId\": \"optional-last-log-id\", + \"limit\": 100 +} +``` + +**Output** +```json +{ + \"logs\": [{ + \"id\": \"log_123\", + \"message\": \"Error message\", + \"error_type\": \"error\", + \"created_at\": \"2024-01-01T12:00:00Z\" + }], + \"lastId\": \"log_123\" +} +``` + +### `platformos.logs.stream` ⚡ **NEW** +**Real-time log streaming with Server-Sent Events** + +**Input** +```json +{ + \"env\": \"staging\", + \"interval\": 3000, + \"filter\": \"error\" +} +``` + +**Streaming Output** (SSE events) +``` +event: data +data: {\"type\":\"text\",\"text\":\"{\\\"id\\\":\\\"123\\\",\\\"timestamp\\\":\\\"2024-01-01T12:00:00Z\\\",\\\"type\\\":\\\"error\\\",\\\"message\\\":\\\"Error occurred\\\",\\\"env\\\":\\\"staging\\\"}\"} + + +event: done +data: [DONE] + + +``` + +**Use with** `POST /call-stream` endpoint for SSE streaming + +### `platformos.logs.live` ⚡ **NEW** +**Enhanced live log monitoring with duplicate detection** + +**Input** +```json +{ + \"env\": \"staging\", + \"interval\": 3000, + \"filter\": \"error\", + \"maxDuration\": 300000 +} +``` + +**Features** +- Duplicate detection using log IDs +- Heartbeat events for connection monitoring +- Configurable streaming duration +- Automatic cleanup on completion + +**Streaming Output** (SSE events) +``` +event: data +data: {\"type\":\"text\",\"text\":\"{\\\"id\\\":\\\"123\\\",\\\"timestamp\\\":\\\"2024-01-01T12:00:00Z\\\",\\\"type\\\":\\\"error\\\",\\\"message\\\":\\\"Error occurred\\\",\\\"env\\\":\\\"staging\\\"}\"} + + +event: heartbeat +data: {\"type\":\"heartbeat\",\"timestamp\":\"2024-01-01T12:00:00Z\",\"env\":\"staging\"} + + +event: done +data: [DONE] + + +``` + +**Use with** `POST /call-stream` endpoint for SSE streaming + +## Other Tools + +- `platformos.modules.list` - List modules +- `platformos.migrations.status` - Migration status +- `platformos.constants.list` - List constants + +## Full Tool List + +| Tool | Category | Async | Streaming | +|------|----------|-------|-----------| +| `platformos.env.*` (3 tools) | Environment | No | No | +| `platformos.graphql.execute` | GraphQL | No | No | +| `platformos.data.export_*` (2) | Data | Yes | No | +| `platformos.liquid.render` | Liquid | No | No | +| `platformos.logs.fetch` | Logs | No | No | +| `platformos.logs.stream` ⚡ | Logs | No | Yes | +| `platformos.logs.live` ⚡ | Logs | No | Yes | +| `platformos.modules.*` | Modules | No | No | +| `platformos.migrations.*` | Migrations | No | No | +| `platformos.constants.*` | Constants | No | No | + +--- +**See `src/tools/*.tools.ts` for exact Zod schemas & handlers** \ No newline at end of file diff --git a/bin/pos-cli-fetch-logs.js b/bin/pos-cli-fetch-logs.js new file mode 100755 index 000000000..a0e33dd07 --- /dev/null +++ b/bin/pos-cli-fetch-logs.js @@ -0,0 +1,74 @@ +#!/usr/bin/env node + +const { program } = require('commander'); +const Gateway = require('../lib/proxy'); +const fetchSettings = require('../lib/settings').fetchSettings; +const logger = require('../lib/logger'); + +program + .name('pos-cli fetch-logs') + .argument('[environment]', 'name of environment. Example: staging') + .option('--last-log-id ', 'return logs after provided id') + .option('--endpoint ', 'override API base url') + .option('-q, --quiet', 'suppress non-log output') + .action(async (environment, options) => { + try { + const authData = fetchSettings(environment); + if (!authData) { + console.error('No auth data available. Set environment or MPKIT_* env vars.'); + process.exit(2); + } + + if (options.endpoint) { + authData.url = options.endpoint; + } + + const gateway = new Gateway(authData); + + let lastId = options.lastLogId || options.lastLogId === 0 ? options.lastLogId : (options.lastLogId || options.lastLogId === 0 ? options.lastLogId : (options.lastLogId || options.lastLogId === 0 ? options.lastLogId : (options.lastLogId))); + // commander converts option name to camelCase: lastLogId + lastId = options.lastLogId || options.lastLogId === 0 ? options.lastLogId : (options.lastLogId); + // fallback to the provided --last-log-id + if (!lastId && options['last-log-id']) lastId = options['last-log-id']; + + // ensure lastId is either undefined or a number/string + if (lastId !== undefined && lastId !== null) { + lastId = String(lastId); + } + + // fetch loop - call gateway.logs until no new logs are returned + let seen = new Set(); + let latestId = lastId || '0'; + while (true) { + const params = { lastId: latestId }; + const response = await gateway.logs(params).catch(err => { throw err; }); + const logs = response && response.logs; + if (!logs || logs.length === 0) { + break; + } + + let maxId = latestId; + for (let i = 0; i < logs.length; i++) { + const row = logs[i]; + if (seen.has(row.id)) continue; + seen.add(row.id); + // Print each log as a JSON line + process.stdout.write(JSON.stringify(row) + '\n'); + if (!isNaN(Number(row.id)) && Number(row.id) > Number(maxId)) { + maxId = row.id; + } + } + + if (maxId === latestId) break; + latestId = maxId; + } + + process.exit(0); + } + catch (err) { + if (!program.quiet) console.error('Error fetching logs:', err.message || err); + process.exit(2); + } + }); + +program.parse(process.argv); diff --git a/bin/pos-cli-generate.js b/bin/pos-cli-generate.js index af1041d5f..f6d88b5bf 100755 --- a/bin/pos-cli-generate.js +++ b/bin/pos-cli-generate.js @@ -4,6 +4,6 @@ import { program } from 'commander'; program .name('pos-cli generate') - .command('list', 'list available generators') - .command('run', 'run specific generator') + .command('list', 'list available generators with required params') + .command('run', 'run specific generator (validates required params)') .parse(process.argv); diff --git a/bin/pos-cli-gui-serve.js b/bin/pos-cli-gui-serve.js index 8f1df3e65..6ada70390 100755 --- a/bin/pos-cli-gui-serve.js +++ b/bin/pos-cli-gui-serve.js @@ -15,6 +15,7 @@ program .name('pos-cli gui serve') .arguments('[environment]', 'name of environment. Example: staging') .option('-p, --port ', 'use PORT', '3333') + .option('-b, --host ', 'use HOST', 'localhost') .option('-o, --open', 'when ready, open default browser with graphiql') .option('-s, --sync', 'Sync files') .action(async (environment, params) => { @@ -24,6 +25,7 @@ program MARKETPLACE_EMAIL: authData.email, MARKETPLACE_TOKEN: authData.token, MARKETPLACE_URL: authData.url, + HOST: params.host, PORT: params.port, CONCURRENCY: process.env.CONCURRENCY || DEFAULT_CONCURRENCY }); @@ -34,7 +36,7 @@ program if (params.open) { try { const open = (await import('open')).default; - await open(`http://localhost:${params.port}`); + await open(`http://${params.host}:${params.port}`); } catch (error) { if (error instanceof AggregateError) { logger.Error(`Failed to open browser (${error.errors.length} attempts): ${error.message}`); diff --git a/bin/pos-cli-mcp-config.js b/bin/pos-cli-mcp-config.js new file mode 100755 index 000000000..d0191b7d3 --- /dev/null +++ b/bin/pos-cli-mcp-config.js @@ -0,0 +1,59 @@ +#!/usr/bin/env node + +import { program } from 'commander'; +import { readFileSync } from 'fs'; +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; + +const __dirname = dirname(fileURLToPath(import.meta.url)); +const defaultConfigPath = join(__dirname, '..', 'mcp-min', 'tools.config.json'); +const configPath = process.env.MCP_TOOLS_CONFIG || defaultConfigPath; + +program + .name('pos-cli-mcp-config') + .description('Display MCP server tool configuration') + .option('--json', 'Output raw JSON') + .action((opts) => { + let config; + try { + config = JSON.parse(readFileSync(configPath, 'utf-8')); + } catch (err) { + console.error(`Error reading config: ${configPath}\n${err.message}`); + process.exit(1); + } + + const source = process.env.MCP_TOOLS_CONFIG ? 'MCP_TOOLS_CONFIG' : 'default (bundled)'; + + if (opts.json) { + console.log(JSON.stringify(config, null, 2)); + return; + } + + console.log(`Config: ${configPath} (${source})\n`); + + const tools = config.tools || {}; + const enabled = []; + const disabled = []; + + for (const [name, cfg] of Object.entries(tools)) { + if (cfg.enabled === false) { + disabled.push({ name, description: cfg.description || '' }); + } else { + enabled.push({ name, description: cfg.description || '' }); + } + } + + console.log(`Enabled (${enabled.length}):`); + for (const t of enabled) { + console.log(` ${t.name.padEnd(26)} ${t.description}`); + } + + if (disabled.length) { + console.log(`\nDisabled (${disabled.length}):`); + for (const t of disabled) { + console.log(` ${t.name.padEnd(26)} ${t.description}`); + } + } + }); + +program.parse(); diff --git a/bin/pos-cli-mcp.js b/bin/pos-cli-mcp.js new file mode 100755 index 000000000..2cba2566e --- /dev/null +++ b/bin/pos-cli-mcp.js @@ -0,0 +1,3 @@ +#!/usr/bin/env node + +import '../mcp-min/index.js'; diff --git a/bin/pos-cli-uploads-push.js b/bin/pos-cli-uploads-push.js index 8e9b21056..4bcd2d159 100755 --- a/bin/pos-cli-uploads-push.js +++ b/bin/pos-cli-uploads-push.js @@ -9,17 +9,17 @@ import { presignUrl } from '../lib/presignUrl.js'; import logger from '../lib/logger.js'; import ora from 'ora'; -const uploadZip = async (directory, gateway) => { +const uploadZip = async (filepath, gateway) => { const spinner = ora({ text: 'Sending file', stream: process.stdout }); spinner.start(); try { const instanceId = (await gateway.getInstance()).id; - const propertyUploadsDirectory = `instances/${instanceId}/property_uploads/data.property_upload_import.zip`; + const propertyUploadsDirectory = `instances/${instanceId}/property_uploads/data.public_property_upload_import.zip`; logger.Debug(propertyUploadsDirectory); - const { uploadUrl } = await presignUrl(propertyUploadsDirectory, directory); - await uploadFile(directory, uploadUrl); + const { uploadUrl } = await presignUrl(propertyUploadsDirectory, filepath); + await uploadFile(filepath, uploadUrl); spinner.stopAndPersist().succeed('Upload done.'); } catch (error) { diff --git a/bin/pos-cli.js b/bin/pos-cli.js index e760965a2..920a94480 100755 --- a/bin/pos-cli.js +++ b/bin/pos-cli.js @@ -38,4 +38,5 @@ program .command('sync ', 'update environment on file change').alias('s') .command('test', 'run tests on instance') .command('uploads', 'manage uploads files') + .command('fetch-logs', 'fetch logs') .parse(process.argv); diff --git a/docs/API.md b/docs/API.md new file mode 100644 index 000000000..12e73a269 --- /dev/null +++ b/docs/API.md @@ -0,0 +1,162 @@ +# MCP Server API Reference + +## Base URL +``` +http://localhost:3030 +``` + +## Authentication + +### Admin (Health endpoint) +``` +x-api-key: $ADMIN_API_KEY +``` + +### MCP Clients +``` +Authorization: Bearer $CLIENT_SECRET +``` +Clients configured in `clients.json`: +```json +{ + \"default\": { + \"token\": \"client-secret\", + \"name\": \"Default Client\" + } +} +``` + +## Endpoints + +### `GET /health` (Admin) +**List server status and available tools** + +```bash +curl http://localhost:3030/health -H \"x-api-key: $ADMIN_API_KEY\" +``` + +**Response** +```json +{ + \"status\": \"ok\", + \"tools\": [\"platformos.env.list\", \"platformos.graphql.execute\", ...], + \"toolCount\": 9 +} +``` + +### `GET /tools` (Client) +**List available tools with schemas** + +```bash +curl http://localhost:3030/tools -H \"Authorization: Bearer client-secret\" +``` + +**Response** +```json +{ + \"tools\": [{ + \"name\": \"platformos.env.list\", + \"description\": \"List environments...\", + \"inputSchema\": { \"type\": \"object\", \"properties\": {} } + }] +} +``` + +### `POST /call` (Client) +**Execute MCP tool** + +```bash +curl -X POST http://localhost:3030/call \\ + -H \"Authorization: Bearer client-secret\" \\ + -H \"Content-Type: application/json\" \\ + -d '{\"tool\":\"platformos.graphql.execute\",\"input\":{\"env\":\"staging\",\"query\":\"query { __schema { types { name } } }\"}}' +``` + +**Request Body** +```json +{ + \"tool\": \"platformos.graphql.execute\", + \"input\": { ... } // Zod-validated per tool +} +``` + +**Response** (MCP format) +```json +{ + \"content\": [{ + \"type\": \"text\", + \"text\": \"{\\\"data\\\": {\\\"__schema\\\": {\\\"types\\\": [...]}}}\" + }] +} +``` + +### `POST /call-stream` (Client - SSE Streaming) ⚡ **NEW** ⚡ +**Execute MCP tool with Server-Sent Events streaming** + +```bash +curl -X POST http://localhost:3030/call-stream \\ + -H \"Authorization: Bearer client-secret\" \\ + -H \"Content-Type: application/json\" \\ + -d '{\"tool\":\"platformos.logs.stream\",\"input\":{\"env\":\"staging\"}}' +``` + +**Request Body** +```json +{ + \"tool\": \"platformos.logs.stream\", + \"input\": { + \"env\": \"staging\", + \"interval\": 3000, + \"filter\": \"error\" + } +} +``` + +**SSE Response Format** +``` +: heartbeat + +event: data +data: {\"type\":\"text\",\"text\":\"{\\\"id\\\":\\\"123\\\",\\\"timestamp\\\":\\\"2024-01-01T12:00:00Z\\\",\\\"type\\\":\\\"info\\\",\\\"message\\\":\\\"Log message\\\",\\\"env\\\":\\\"staging\\\"}\"} + + +event: done +data: [DONE] + +``` + +**Supported Streaming Tools** +- `platformos.logs.stream` - Real-time log streaming with automatic polling +- `platformos.logs.live` - Live log monitoring with duplicate detection and heartbeats + +**Streaming Events** +- `data` - Log entry or tool result chunk +- `error` - Error occurred during streaming +- `done` - Stream completed successfully +- Heartbeat events (`:` prefix) - Keep connection alive + +**Connection Management** +- Server maintains active connection count +- Automatic cleanup on connection close/error +- Graceful shutdown handling for all active streams + +## Error Responses + +```json +{ \"error\": \"Unauthorized\" } // 401 +{ \"error\": \"Tool 'foo' not found\" } // 404 +{ \"error\": \"[ZodError]: Invalid input\" } // 400 +``` + +## MCP Protocol Compliance + +- **Tools List**: `/tools` returns JSON schema approximations +- **Tool Calls**: `/call` validates with Zod, returns MCP `content[]` format +- **Streaming**: Now supported via Server-Sent Events on `/call-stream` + +## GraphQL Schema + +All tools use Zod schemas internally. Full TypeScript types in `src/types/index.ts`. + +--- +*See [TOOLS.md](TOOLS.md) for complete tool specifications.* \ No newline at end of file diff --git a/docs/MCP_TOOLS.md b/docs/MCP_TOOLS.md new file mode 100644 index 000000000..55132f2de --- /dev/null +++ b/docs/MCP_TOOLS.md @@ -0,0 +1,2214 @@ +# MCP Tools Documentation + +Complete reference guide for all platformOS Model Context Protocol (MCP) tools available in pos-cli MCP server. + +**Total Tools**: 26 active tools + +--- + +## Table of Contents + +1. [Authentication](#authentication) +2. [Environment Management](#environment-management) +3. [Logging & Monitoring](#logging--monitoring) +4. [GraphQL & Liquid](#graphql--liquid) +5. [Generators](#generators) +6. [Migrations](#migrations) +7. [Deployment](#deployment) +8. [Data Operations](#data-operations) +9. [Testing](#testing) +10. [Linting](#linting) +11. [File Sync](#file-sync) +12. [Property Uploads](#property-uploads) +13. [Constants](#constants) +14. [Response Patterns](#response-patterns) + +--- + +## Authentication + +All tools (except `envs-list` and generator tools) support multiple authentication methods with the following precedence: + +### Authentication Precedence + +1. **Explicit Parameters** (highest priority) + ```json + { "url": "https://instance.com", "email": "user@example.com", "token": "auth-token" } + ``` + +2. **Environment Variables** + ```bash + MPKIT_URL=https://instance.com + MPKIT_EMAIL=user@example.com + MPKIT_TOKEN=auth-token + ``` + +3. **Named Environment from `.pos` File** + ```json + { "env": "staging" } + ``` + +4. **First Environment in `.pos` File** (lowest priority) + +### Error When No Auth Available + +```json +{ + "ok": false, + "error": { + "code": "AUTH_MISSING", + "message": "Provide url,email,token or configure .pos / MPKIT_* env vars" + } +} +``` + +--- + +## Environment Management + +### envs-list + +List all configured environments from `.pos` configuration file. + +**Tool Name**: `envs-list` + +**Input Parameters**: +- None (empty object `{}`) + +**Response Format**: +```javascript +{ + environments: [ + { name: "staging", url: "https://staging.example.com" }, + { name: "production", url: "https://prod.example.com" } + ] +} +``` + +**Use Case**: Discover available environments before connecting to one. + +--- + +## Logging & Monitoring + +### logs-fetch + +Fetch recent logs from a platformOS instance in batches. Pagination supported via `lastId`. + +**Tool Name**: `logs-fetch` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name from `.pos` config +- `url` *(string, optional)*: Instance URL (alternative to `env`) +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `lastId` *(string, optional)*: Starting log ID for pagination (default: `'0'`) +- `endpoint` *(string, optional)*: Override API base URL +- `limit` *(integer, optional)*: Maximum logs to fetch (1-10000) + +**Response Format**: +```javascript +{ + logs: [ + { id: "1001", timestamp: "2025-01-23T10:30:45Z", level: "info", message: "..." }, + { id: "1002", timestamp: "2025-01-23T10:31:00Z", level: "error", message: "..." } + ], + lastId: "1002", + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:31:30Z", + count: 2, + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +# Fetch first 100 logs +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "logs-fetch", + "params": { + "env": "staging", + "limit": 100, + "lastId": "0" + } + }' + +# Fetch next batch starting from previous lastId +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "logs-fetch", + "params": { + "env": "staging", + "limit": 100, + "lastId": "1002" + } + }' +``` + +**Use Case**: Retrieve historical logs for debugging and monitoring. + +--- + +## GraphQL & Liquid + +### graphql-exec + +Execute GraphQL queries and mutations on a platformOS instance. + +**Tool Name**: `graphql-exec` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL +- `query` *(string, required)*: GraphQL query or mutation string +- `variables` *(object, optional)*: GraphQL variables + +**Response Format**: +```javascript +{ + success: true, + result: { + data: { + users: [ + { id: "1", name: "Alice", email: "alice@example.com" }, + { id: "2", name: "Bob", email: "bob@example.com" } + ] + } + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +# Query users +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "graphql-exec", + "params": { + "env": "staging", + "query": "{ users { id name email } }" + } + }' + +# Mutation with variables +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "graphql-exec", + "params": { + "env": "staging", + "query": "mutation CreateUser($email: String!) { create_user(user: { email: $email }) { id } }", + "variables": { "email": "newuser@example.com" } + } + }' +``` + +**Use Case**: Execute custom GraphQL queries and mutations for data operations. + +--- + +### liquid-exec + +Render Liquid templates on a platformOS instance. + +**Tool Name**: `liquid-exec` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL +- `template` *(string, required)*: Liquid template string +- `locals` *(object, optional)*: Template variables + +**Response Format**: +```javascript +{ + success: true, + result: "Hello Alice! Your score is 42.", + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Error Response**: +```javascript +{ + success: false, + error: { + code: "LIQUID_ERROR", + message: "Syntax error in template", + details: { line: 5, column: 10 } + } +} +``` + +**Example Usage**: +```bash +# Simple template +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "liquid-exec", + "params": { + "env": "staging", + "template": "Hello {{ name }}!", + "locals": { "name": "Alice" } + } + }' + +# Template with logic +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "liquid-exec", + "params": { + "env": "staging", + "template": "{% if score >= 50 %}Passed{% else %}Failed{% endif %}", + "locals": { "score": 75 } + } + }' +``` + +**Use Case**: Test Liquid template rendering and behavior. + +--- + +## Generators + +### generators-list + +List all available yeoman generators in the project. + +**Tool Name**: `generators-list` + +**Input Parameters**: +- None (empty object `{}`) + +**Response Format**: +```javascript +{ + generators: [ + { + path: "modules/core/generators/model", + name: "model", + required: ["name"], + optional: ["fields", "properties"] + }, + { + path: "modules/core/generators/command", + name: "command", + required: ["name"], + optional: [] + } + ] +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{"tool": "generators-list", "params": {}}' +``` + +**Use Case**: Discover available generators before running one. + +--- + +### generators-help + +Show detailed help for a specific generator. + +**Tool Name**: `generators-help` + +**Input Parameters**: +- `generatorPath` *(string, required)*: Path like `modules/core/generators/` + +**Response Format**: +```javascript +{ + name: "model", + usage: "pos-cli generate modules/core/generators/model [options]", + description: "Generate a data model", + requiredArgs: ["name"], + optionsTable: "[--force] [--namespace=...] [--fields=...]", + optionsDetails: [ + { flag: "--force", description: "Overwrite existing files" }, + { flag: "--namespace=STR", description: "Model namespace" }, + { flag: "--fields=STR", description: "Comma-separated fields" } + ] +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "generators-help", + "params": { "generatorPath": "modules/core/generators/model" } + }' +``` + +**Use Case**: Get help on how to use a specific generator. + +--- + +### generators-run + +Run a yeoman generator with arguments and options. + +**Tool Name**: `generators-run` + +**Input Parameters**: +- `generatorPath` *(string, required)*: Path like `modules/core/generators/` +- `args` *(array of strings, required)*: Positional arguments (order matters) +- `options` *(object, optional)*: Generator options (e.g., `--name=value`) +- `requireArgs` *(boolean, optional, default: true)*: Validate required arguments + +**Response Format**: +```javascript +{ + success: true, + result: { + createdFiles: ["app/models/user.liquid"], + message: "Model user created successfully" + } +} +``` + +**Error Response**: +```javascript +{ + success: false, + error: { + code: "MISSING_ARGS", + message: "Missing required arguments", + required: ["name"] + } +} +``` + +**Example Usage**: +```bash +# Generate a model +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "generators-run", + "params": { + "generatorPath": "modules/core/generators/model", + "args": ["user"], + "options": { "fields": "name,email,phone" } + } + }' +``` + +**Use Case**: Generate code scaffolds for models, pages, commands, etc. + +--- + +## Migrations + +### migrations-list + +List all migrations deployed to a platformOS instance. + +**Tool Name**: `migrations-list` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + status: "ok", + data: { + migrations: [ + { id: "1234567890", name: "1234567890_create_users", state: "executed" }, + { id: "1234567891", name: "1234567891_add_profile", state: "executed" }, + { id: "1234567892", name: "1234567892_add_preferences", state: "pending", error_messages: [] } + ] + } +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "migrations-list", + "params": { "env": "staging" } + }' +``` + +**Use Case**: Check migration status and history. + +--- + +### migrations-generate + +Generate a new migration on the server and write local file. + +**Tool Name**: `migrations-generate` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `name` *(string, required)*: Base name without timestamp (e.g., `add_user_fields`) +- `skipWrite` *(boolean, optional, default: false)*: Don't create local file +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + status: "ok", + data: { + name: "1674403200_add_user_fields", + bodyLength: 156, + filePath: "app/migrations/1674403200_add_user_fields.liquid" + } +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "migrations-generate", + "params": { + "env": "staging", + "name": "add_user_fields" + } + }' +``` + +**Use Case**: Generate new migrations with auto-generated timestamps. + +--- + +### migrations-run + +Execute a specific migration on the server. + +**Tool Name**: `migrations-run` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `timestamp` *(string, optional)*: Migration timestamp +- `name` *(string, optional)*: Full migration name without `.liquid` +- `endpoint` *(string, optional)*: Override API base URL + +**Note**: Provide either `timestamp` or `name` (not both) + +**Response Format**: +```javascript +{ + status: "ok", + data: { + name: "1234567890_add_user_fields", + status: "executed" + } +} +``` + +**Example Usage**: +```bash +# By name +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "migrations-run", + "params": { + "env": "staging", + "name": "1674403200_add_user_fields" + } + }' +``` + +**Use Case**: Execute pending migrations. + +--- + +## Deployment + +### deploy-start + +Deploy to a platformOS instance. Creates archive from `app/` and `modules/` directories, uploads it, and deploys assets to S3. + +**Tool Name**: `deploy-start` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `partial` *(boolean, optional, default: false)*: Partial deploy (doesn't remove missing files) + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "abc123def456", + status: "processing" + }, + archive: { + path: "./tmp/release.zip", + fileCount: 156 + }, + assets: { + count: 42, + skipped: false + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:05Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" }, + params: { partial: false } + } +} +``` + +**Example Usage**: +```bash +# Full deploy +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "deploy-start", + "params": { "env": "staging", "partial": false } + }' + +# Partial deploy (doesn't remove files) +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "deploy-start", + "params": { "env": "staging", "partial": true } + }' +``` + +**Use Case**: Deploy code to a platformOS instance. + +--- + +### deploy-status + +Get the current status of a deployment. + +**Tool Name**: `deploy-status` + +**Input Parameters**: +- `id` *(string, required)*: Deployment ID from `deploy-start` +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "abc123def456", + status: "processing", + createdAt: "2025-01-23T10:30:00Z", + progress: { current: 42, total: 100 } + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "deploy-status", + "params": { + "env": "staging", + "id": "abc123def456" + } + }' +``` + +**Use Case**: Check deployment progress without blocking. + +--- + +### deploy-wait + +Wait for a deployment to complete. Polls until status is no longer "ready_for_import". + +**Tool Name**: `deploy-wait` + +**Input Parameters**: +- `id` *(string, required)*: Deployment ID from `deploy-start` +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `intervalMs` *(integer, optional, min: 200, default: 1000)*: Poll interval +- `maxWaitMs` *(integer, optional)*: Maximum wait time before timeout +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "abc123def456", + status: "done", + completedAt: "2025-01-23T10:30:30Z" + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:31Z" + } +} +``` + +**Error on Failure**: +```javascript +{ + ok: false, + error: { + code: "DEPLOY_FAILED", + message: "Deployment failed", + data: { status: "error", errorMessage: "..." } + } +} +``` + +**Example Usage**: +```bash +# Wait for deploy with 2s polls, max 10 minutes +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "deploy-wait", + "params": { + "env": "staging", + "id": "abc123def456", + "intervalMs": 2000, + "maxWaitMs": 600000 + } + }' +``` + +**Use Case**: Block until deployment completes in automated workflows. + +--- + +## Data Operations + +### data-import + +Start a data import from JSON file, JSON object, or ZIP archive. + +**Tool Name**: `data-import` + +**Input Parameters** (exactly one data source required): +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `filePath` *(string, optional)*: Path to JSON or ZIP file +- `jsonData` *(object, optional)*: JSON data to import directly +- `zipFileUrl` *(string, optional)*: Remote ZIP file URL +- `rawIds` *(boolean, optional, default: false)*: Keep original IDs +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "import-123", + status: "processing", + isZip: false + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +# Import from file +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-import", + "params": { + "env": "staging", + "filePath": "./export.json" + } + }' + +# Import from JSON object +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-import", + "params": { + "env": "staging", + "jsonData": { + "users": [ + { "external_id": "1", "name": "Alice", "email": "alice@example.com" }, + { "external_id": "2", "name": "Bob", "email": "bob@example.com" } + ] + } + } + }' + +# Import from remote ZIP +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-import", + "params": { + "env": "staging", + "zipFileUrl": "https://example.com/backup.zip" + } + }' +``` + +**Use Case**: Bulk import data to a platformOS instance. + +--- + +### data-import-status + +Check the status of a data import job. + +**Tool Name**: `data-import-status` + +**Input Parameters**: +- `jobId` *(string, required)*: Import job ID from `data-import` +- `isZip` *(boolean, optional, default: false)*: Set to true if import was ZIP +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "import-123", + status: "done", + done: 100, + pending: 0, + failed: 0, + response: { created: 2, updated: 0 } + }, + meta: { auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } } +} +``` + +**Status Values**: `pending`, `processing`, `scheduled`, `done`, `failed` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-import-status", + "params": { + "env": "staging", + "jobId": "import-123", + "isZip": false + } + }' +``` + +**Use Case**: Poll until data import completes. + +--- + +### data-export + +Start a data export from a platformOS instance. + +**Tool Name**: `data-export` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `exportInternalIds` *(boolean, optional, default: false)*: Use internal IDs instead of external_id +- `zip` *(boolean, optional, default: false)*: Export as ZIP archive +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "export-456", + status: "processing", + isZip: false + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +# Export as JSON +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-export", + "params": { + "env": "staging", + "zip": false + } + }' + +# Export as ZIP with internal IDs +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-export", + "params": { + "env": "staging", + "zip": true, + "exportInternalIds": true + } + }' +``` + +**Use Case**: Backup data from a platformOS instance. + +--- + +### data-export-status + +Check the status of a data export job. + +**Tool Name**: `data-export-status` + +**Input Parameters**: +- `jobId` *(string, required)*: Export job ID from `data-export` +- `isZip` *(boolean, optional, default: false)*: Set to true if export is ZIP +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "export-456", + status: "done", + done: 100, + pending: 0, + failed: 0, + zipFileUrl: "https://s3.example.com/export.zip", // if zip: true + exportedData: { // if zip: false + users: [...], + transactables: [...], + models: [...] + } + }, + meta: { auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } } +} +``` + +**Status Values**: `pending`, `processing`, `scheduled`, `done`, `failed` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-export-status", + "params": { + "env": "staging", + "jobId": "export-456", + "isZip": false + } + }' +``` + +**Use Case**: Poll until data export completes and retrieve results. + +--- + +### data-clean + +Start a destructive data clean operation. Requires confirmation string. + +**Tool Name**: `data-clean` + +**⚠️ WARNING**: This is a destructive operation. Requires explicit confirmation. + +**Input Parameters**: +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `confirmation` *(string, required)*: Must be exactly `"CLEAN DATA"` +- `includeSchema` *(boolean, optional, default: false)*: Also remove pages, schemas, etc. +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "clean-789", + status: "processing", + includeSchema: false + }, + warning: "This operation is irreversible. All data has been removed from the instance.", + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Error on Wrong Confirmation**: +```javascript +{ + ok: false, + error: { + code: "CONFIRMATION_MISMATCH", + message: "Confirmation string does not match", + expected: "CLEAN DATA", + received: "CLEAN" + } +} +``` + +**Example Usage**: +```bash +# Clean data only +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-clean", + "params": { + "env": "staging", + "confirmation": "CLEAN DATA", + "includeSchema": false + } + }' + +# Clean data AND schema +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-clean", + "params": { + "env": "staging", + "confirmation": "CLEAN DATA", + "includeSchema": true + } + }' +``` + +**Use Case**: Reset instance for testing or troubleshooting. + +--- + +### data-clean-status + +Check the status of a data clean operation. + +**Tool Name**: `data-clean-status` + +**Input Parameters**: +- `jobId` *(string, required)*: Clean job ID from `data-clean` +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `endpoint` *(string, optional)*: Override API base URL + +**Response Format**: +```javascript +{ + ok: true, + data: { + id: "clean-789", + status: "done", + done: 100, + pending: 0, + failed: 0, + response: { removed: true } + }, + meta: { auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } } +} +``` + +**Status Values**: `pending`, `processing`, `scheduled`, `done`, `failed` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-clean-status", + "params": { + "env": "staging", + "jobId": "clean-789" + } + }' +``` + +**Use Case**: Poll until data clean completes. + +--- + +## Testing + +### unit-tests-run + +Run platformOS tests on an instance. + +**Tool Name**: `unit-tests-run` + +**Input Parameters**: +- `env` *(string, optional)*: Environment name from `.pos` config +- `url` *(string, optional)*: Instance URL (alternative to `env`) +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `path` *(string, optional)*: Test path filter (e.g., `tests/users`). Calls `/_tests/run?formatter=text&path=...` +- `name` *(string, optional)*: Test name filter (e.g., `create_user_test`). Calls `/_tests/run?formatter=text&name=...` + +**Note**: Both `path` and `name` can be combined to narrow down test selection. + +**Response Format**: +```javascript +{ + ok: true, + data: { + tests: [ + { name: "create_user_test", description: "Creates a new user", passed: true }, + { name: "delete_user_test", description: "Deletes a user", passed: true }, + { name: "invalid_email_test", description: "Rejects invalid email", passed: false, error: "Expected false, got true" } + ], + summary: { + assertions: 24, + failed: 1, + timeMs: 2345, + totalErrors: 1 + }, + passed: 2, + totalTests: 3 + }, + raw: "...", // raw test output + meta: { + url: "https://...", + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Example Usage**: +```bash +# Run all tests +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "unit-tests-run", + "params": { "env": "staging" } + }' + +# Run tests in specific path +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "unit-tests-run", + "params": { + "env": "staging", + "path": "tests/users" + } + }' + +# Run specific test by path and name +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "unit-tests-run", + "params": { + "env": "staging", + "path": "tests/users", + "name": "create_user_test" + } + }' + +# Run test by name only +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "unit-tests-run", + "params": { + "env": "staging", + "name": "create_user_test" + } + }' +``` + +**API Endpoint Called**: +- All tests: `/_tests/run?formatter=text` +- With path: `/_tests/run?formatter=text&path=tests%2Fusers` +- With name: `/_tests/run?formatter=text&name=create_user_test` +- With both: `/_tests/run?formatter=text&path=tests%2Fusers&name=create_user_test` + +**Use Case**: Execute platformOS tests to verify functionality. + +--- + +## Linting + +### check + +Run platformos-check to lint and analyze the app for best practice violations. Checks Liquid, JSON, YAML, and HTML files. + +**Tool Name**: `check` + +**Input Parameters**: +- `appPath` *(string, optional)*: Path to the platformOS app (default: current directory) +- `format` *(string, optional, enum: ['text', 'json'], default: 'json')*: Output format +- `category` *(array of strings, optional)*: Only run checks matching these categories (can specify multiple) +- `excludeCategory` *(array of strings, optional)*: Exclude checks matching these categories (can specify multiple) +- `autoCorrect` *(boolean, optional, default: false)*: Automatically fix offenses +- `failLevel` *(string, optional, enum: ['error', 'suggestion', 'style'])*: Minimum severity level to fail with error code +- `config` *(string, optional)*: Path to custom `.platformos-check.yml` config file +- `list` *(boolean, optional, default: false)*: List enabled checks without running them +- `print` *(boolean, optional, default: false)*: Print active config to STDOUT + +**Available Check Categories**: +- `:liquid` - Liquid template checks +- `:graphql` - GraphQL checks +- `:yaml` - YAML validation +- `:html` - HTML checks +- `:performance` - Performance-related checks +- `:translation` - Translation key checks + +**Response Format** (for normal check run): +```javascript +{ + ok: true, + data: { + result: { + offenses: [ + { + file: "app/views/index.liquid", + line: 5, + column: 2, + message: "Space inside braces", + severity: "style", + check: "SpaceInsideBraces" + } + ], + summary: { + total: 5, + errors: 2, + warnings: 1, + suggestions: 2 + } + }, + format: "json", + appPath: ".", + autoCorrect: false + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:05Z" + } +} +``` + +**Response Format** (when list=true): +```javascript +{ + ok: true, + data: { + result: "ConvertIncludeToRender:\n severity: suggestion\n categories: [:liquid]\n ...", + format: "json", + listChecks: true + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z" + } +} +``` + +**Example Usage**: + +```bash +# Run all checks on current directory (JSON format) +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "format": "json" + } + }' + +# Run only liquid checks +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "category": ["liquid"] + } + }' + +# Exclude performance checks +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "excludeCategory": ["performance"] + } + }' + +# Auto-fix offenses +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "autoCorrect": true + } + }' + +# List all enabled checks +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "list": true + } + }' + +# Print active configuration +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "print": true + } + }' + +# Run checks with custom config +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "check", + "params": { + "config": ".platformos-check.yml", + "failLevel": "error" + } + }' +``` + +**Available Checks** (examples): + +| Check Name | Category | Severity | Description | +|-----------|----------|----------|-------------| +| SyntaxError | liquid | error | Detects Liquid syntax errors | +| MissingTemplate | liquid | suggestion | Detects references to missing templates | +| UnusedPartial | liquid | suggestion | Detects unused partial templates | +| UnknownFilter | liquid | error | Detects undefined Liquid filters | +| UndefinedObject | liquid | error | Detects undefined template objects | +| SpaceInsideBraces | liquid | style | Ensures consistent spacing in braces | +| InvalidArgs | liquid, graphql | error | Validates filter/tag arguments | +| FormAction | html | error | Ensures forms have action attribute | +| ImgWidthAndHeight | html, performance | error | Requires width/height on images | +| ParserBlockingJavaScript | html, performance | error | Detects parser-blocking JavaScript | +| ValidYaml | yaml | error | Validates YAML syntax | +| TranslationKeyExists | translation, liquid | error | Validates translation keys exist | + +**Use Case**: Lint code, find best practice violations, and auto-fix issues. + +--- + +## File Sync + +### sync-file + +Sync a single file to a platformOS instance (upload or delete). + +**Tool Name**: `sync-file` + +**Input Parameters** (required: filePath): +- `filePath` *(string, required)*: Path to file (relative to project root) +- `env` *(string, optional)*: Environment name +- `url` *(string, optional)*: Instance URL +- `email` *(string, optional)*: Account email +- `token` *(string, optional)*: API token +- `op` *(string, optional, enum: ["upload", "delete"])*: Operation (auto-detected if not provided) +- `dryRun` *(boolean, optional, default: false)*: Simulate without performing +- `confirmDelete` *(boolean, optional, default: false)*: Required to confirm deletion +- `endpoint` *(string, optional)*: Override API base URL + +**Supported Directories**: +- `app/` - Application files +- `modules/*/` - Module files +- `marketplace_builder/` - Legacy directory + +**Response Format**: +```javascript +{ + ok: true, + file: { + path: "app/views/index.html", + size: 1234, + operation: "upload", + isAsset: false, + wasIgnored: false + }, + server: { + response: { success: true }, + timingMs: 234 + }, + meta: { + dryRun: false, + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:00Z", + auth: { url: "https://...", email: "...", token: "abc...xyz", source: ".pos(staging)" } + } +} +``` + +**Error on Missing Confirmation**: +```javascript +{ + ok: false, + error: { + code: "DELETE_REQUIRES_CONFIRMATION", + message: "File deletion requires confirmDelete=true" + } +} +``` + +**Example Usage**: +```bash +# Upload a file +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "sync-file", + "params": { + "env": "staging", + "filePath": "app/views/index.html", + "op": "upload" + } + }' + +# Delete a file (requires confirmation) +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "sync-file", + "params": { + "env": "staging", + "filePath": "app/views/old.html", + "op": "delete", + "confirmDelete": true + } + }' + +# Dry run upload (simulate without performing) +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "sync-file", + "params": { + "env": "staging", + "filePath": "app/views/index.html", + "op": "upload", + "dryRun": true + } + }' +``` + +**Use Case**: Sync individual files without full deployment. + +--- + +## Property Uploads + +### uploads-push + +Upload a ZIP file containing property uploads (files referenced by upload-type properties) to a platformOS instance. This is the MCP equivalent of `pos-cli uploads push`. + +**Tool Name**: `uploads-push` + +**Input Parameters**: +- `env` *(string, required)*: Environment name from `.pos` config +- `filePath` *(string, required)*: Path to ZIP file with uploads + +**Response Format**: +```javascript +{ + ok: true, + data: { + instanceId: "abc123", + filePath: "/path/to/uploads.zip", + accessUrl: "https://cdn.platformos.com/instances/abc123/property_uploads/data.public_property_upload_import.zip" + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:05Z" + } +} +``` + +**Error Responses**: +```javascript +// File not found +{ + ok: false, + error: { code: "FILE_NOT_FOUND", message: "File not found: /path/to/uploads.zip" } +} + +// Upload failed +{ + ok: false, + error: { code: "UPLOAD_FAILED", message: "Error details..." } +} +``` + +**Example Usage**: +```bash +# Upload a ZIP file +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "uploads-push", + "params": { + "env": "staging", + "filePath": "./uploads.zip" + } + }' + +# Upload from seed directory +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "uploads-push", + "params": { + "env": "production", + "filePath": "./seed/images.zip" + } + }' +``` + +--- + +### Complete Uploads Tutorial + +This tutorial explains how to use `uploads-push` to import files that will be referenced by upload-type properties in your platformOS data. + +#### Step 1: Define Upload Properties in Your Schema + +First, create a table with an upload-type property in your platformOS app: + +**`app/schema/photo.yml`**: +```yaml +name: photo +properties: + - name: title + type: string + - name: image + type: upload + options: + content_length: + min: 0 + max: 5242880 # 5MB max + versions: + - name: thumb + output: + format: webp + quality: 80 + resize: + width: 150 + height: 150 + fit: cover + - name: medium + output: + format: webp + quality: 85 + resize: + width: 800 + height: 600 + fit: inside + without_enlargement: true +``` + +#### Step 2: Prepare Your ZIP File Structure + +Create a ZIP file with your upload files organized in directories: + +``` +uploads.zip +└── photo_images/ + ├── sunset.jpg + ├── mountain.png + └── beach.webp +``` + +The directory name (`photo_images`) will be used as a reference path in your data import. + +**Creating the ZIP**: +```bash +# From your project root +mkdir -p seed/photo_images +cp /path/to/your/images/* seed/photo_images/ +cd seed && zip -r ../uploads.zip photo_images/ +``` + +#### Step 3: Push Uploads to platformOS + +Use the `uploads-push` tool to upload the ZIP file: + +```bash +# Using MCP server +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "uploads-push", + "params": { + "env": "staging", + "filePath": "./uploads.zip" + } + }' +``` + +Or using the CLI directly: +```bash +pos-cli uploads push staging --path=uploads.zip +``` + +#### Step 4: Import Data Referencing the Uploads + +After uploading files, import your data records that reference them. + +**`seed/data.json`**: +```json +{ + "records": [ + { + "id": "photo-1", + "type": "photo", + "properties": { + "title": "Beautiful Sunset", + "image": { + "path": "photo_images/sunset.jpg", + "file_name": "sunset.jpg", + "extension": ".jpg", + "versions": { + "thumb": "photo_images/sunset.jpg", + "medium": "photo_images/sunset.jpg" + } + } + } + }, + { + "id": "photo-2", + "type": "photo", + "properties": { + "title": "Mountain View", + "image": { + "path": "photo_images/mountain.png", + "file_name": "mountain.png", + "extension": ".png", + "versions": { + "thumb": "photo_images/mountain.png", + "medium": "photo_images/mountain.png" + } + } + } + } + ] +} +``` + +Import the data: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "data-import", + "params": { + "env": "staging", + "filePath": "./seed/data.json" + } + }' +``` + +#### Step 5: Access Uploaded Files + +After import, access your files using the `property_upload` GraphQL argument: + +**GraphQL Query**: +```graphql +query GetPhotos { + records( + per_page: 10 + filter: { table: { value: "photo" } } + ) { + results { + id + title: property(name: "title") + image: property_upload(name: "image") { + url + versions + } + } + } +} +``` + +**Response**: +```json +{ + "data": { + "records": { + "results": [ + { + "id": "photo-1", + "title": "Beautiful Sunset", + "image": { + "url": "https://cdn.platformos.com/.../sunset.jpg", + "versions": { + "thumb": "https://cdn.platformos.com/.../sunset_thumb.webp", + "medium": "https://cdn.platformos.com/.../sunset_medium.webp" + } + } + } + ] + } + } +} +``` + +#### Complete Workflow Script + +Here's a complete workflow combining all steps: + +```bash +#!/bin/bash +set -e + +ENV="staging" +MCP_URL="http://localhost:5910/call" + +echo "=== Step 1: Push uploads ===" +curl -s -X POST $MCP_URL \ + -H "Content-Type: application/json" \ + -d "{ + \"tool\": \"uploads-push\", + \"params\": { + \"env\": \"$ENV\", + \"filePath\": \"./uploads.zip\" + } + }" | jq . + +echo "" +echo "=== Step 2: Import data ===" +IMPORT_RESULT=$(curl -s -X POST $MCP_URL \ + -H "Content-Type: application/json" \ + -d "{ + \"tool\": \"data-import\", + \"params\": { + \"env\": \"$ENV\", + \"filePath\": \"./seed/data.json\" + } + }") + +echo "$IMPORT_RESULT" | jq . +JOB_ID=$(echo "$IMPORT_RESULT" | jq -r '.data.id') + +echo "" +echo "=== Step 3: Wait for import ===" +while true; do + STATUS=$(curl -s -X POST $MCP_URL \ + -H "Content-Type: application/json" \ + -d "{ + \"tool\": \"data-import-status\", + \"params\": { + \"env\": \"$ENV\", + \"jobId\": \"$JOB_ID\" + } + }") + + STATE=$(echo "$STATUS" | jq -r '.data.status') + echo "Status: $STATE" + + if [ "$STATE" = "done" ] || [ "$STATE" = "failed" ]; then + echo "$STATUS" | jq . + break + fi + + sleep 2 +done + +echo "" +echo "=== Done! ===" +``` + +#### Tips and Best Practices + +1. **File Organization**: Use meaningful directory names that match your property names for clarity. + +2. **File Size Limits**: Check your upload property's `content_length` options to ensure files don't exceed limits. + +3. **Supported Formats**: For images, platformOS supports common formats (JPEG, PNG, WebP, GIF). The `versions` option can convert between formats. + +4. **Version Paths**: In import data, version paths typically point to the same source file - platformOS generates the versions automatically based on your schema options. + +5. **Order Matters**: Always push uploads BEFORE importing data that references them. + +6. **Idempotency**: Re-running `uploads-push` with the same ZIP will overwrite existing files at the same paths. + +--- + +## Constants + +Manage instance constants (environment variables stored on the platformOS instance). + +### constants-list + +List all constants configured on a platformOS instance. + +**Tool Name**: `constants-list` + +**Input Parameters**: +- `env` *(string, required)*: Environment name from `.pos` config + +**Response Format**: +```javascript +{ + ok: true, + data: { + constants: [ + { name: "API_KEY", value: "abc123...", updatedAt: "2025-01-23T10:30:00Z" }, + { name: "SECRET_TOKEN", value: "xyz789...", updatedAt: "2025-01-22T08:00:00Z" } + ], + count: 2 + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z" + } +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "constants-list", + "params": { "env": "staging" } + }' +``` + +--- + +### constants-set + +Set a constant on a platformOS instance. Creates or updates the constant. + +**Tool Name**: `constants-set` + +**Input Parameters**: +- `env` *(string, required)*: Environment name from `.pos` config +- `name` *(string, required)*: Name of the constant (e.g., `API_KEY`) +- `value` *(string, required)*: Value of the constant + +**Response Format**: +```javascript +{ + ok: true, + data: { + name: "API_KEY", + value: "new-value-here" + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z" + } +} +``` + +**Example Usage**: +```bash +# Set a new constant +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "constants-set", + "params": { + "env": "staging", + "name": "API_KEY", + "value": "sk-1234567890" + } + }' + +# Update an existing constant +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "constants-set", + "params": { + "env": "production", + "name": "STRIPE_KEY", + "value": "pk_live_xxxxx" + } + }' +``` + +--- + +### constants-unset + +Delete a constant from a platformOS instance. + +**Tool Name**: `constants-unset` + +**Input Parameters**: +- `env` *(string, required)*: Environment name from `.pos` config +- `name` *(string, required)*: Name of the constant to delete + +**Response Format**: +```javascript +{ + ok: true, + data: { + name: "OLD_KEY", + deleted: true + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:01Z" + } +} +``` + +**Example Usage**: +```bash +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "constants-unset", + "params": { + "env": "staging", + "name": "DEPRECATED_KEY" + } + }' +``` + +**Note**: If the constant doesn't exist, the response will have `deleted: false`. + +--- + +## Response Patterns + +### Standard Success Response + +Most tools follow this pattern for success: + +```javascript +{ + ok: true, + data: { + // Tool-specific data + }, + meta: { + startedAt: "2025-01-23T10:30:00Z", + finishedAt: "2025-01-23T10:30:02Z", + auth: { + url: "https://instance.platformos.net", + email: "user@example.com", + token: "abc...xyz", // First 3 and last 3 chars, masked + source: ".pos(staging)" // Where auth came from + } + } +} +``` + +### Standard Error Response + +Tools return errors without throwing to prevent server crashes: + +```javascript +{ + ok: false, + error: { + code: "ERROR_CODE", // Machine-readable error type + message: "Human-readable message", + details?: {...} // Optional extra details + } +} +``` + +### Async Job Pattern + +Long-running operations follow this pattern: + +**1. Start operation**: +```javascript +POST /call +{ + "tool": "deploy-start", + "params": {"env": "staging"} +} +``` + +**2. Get job ID from response**: +```javascript +{ + "ok": true, + "data": {"id": "abc123def456", "status": "processing"} +} +``` + +**3. Poll for status**: +```javascript +POST /call +{ + "tool": "deploy-status", + "params": {"env": "staging", "id": "abc123def456"} +} +``` + +**4. Or wait for completion**: +```javascript +POST /call +{ + "tool": "deploy-wait", + "params": {"env": "staging", "id": "abc123def456"} +} +``` + +--- + +## Workflow Examples + +### Complete Deployment Workflow + +```bash +# 1. Start deployment +DEPLOY_ID=$(curl -s -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "deploy-start", + "params": {"env": "staging"} + }' | jq -r '.data.id') + +# 2. Wait for completion +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d "{ + \"tool\": \"deploy-wait\", + \"params\": { + \"env\": \"staging\", + \"id\": \"$DEPLOY_ID\", + \"maxWaitMs\": 600000 + } + }" +``` + +### Data Migration Workflow + +```bash +# 1. Export data from production +EXPORT_ID=$(curl -s -X POST http://localhost:5910/call \ + -d '{ + "tool": "data-export", + "params": {"env": "production", "zip": true} + }' | jq -r '.data.id') + +# 2. Poll export status +curl -X POST http://localhost:5910/call \ + -d "{ + \"tool\": \"data-export-status\", + \"params\": {\"env\": \"production\", \"jobId\": \"$EXPORT_ID\", \"isZip\": true} + }" + +# 3. Get ZIP URL and import to staging +ZIP_URL=$(curl -s -X POST http://localhost:5910/call \ + -d "{...}" | jq -r '.data.zipFileUrl') + +IMPORT_ID=$(curl -s -X POST http://localhost:5910/call \ + -d "{ + \"tool\": \"data-import\", + \"params\": {\"env\": \"staging\", \"zipFileUrl\": \"$ZIP_URL\"} + }" | jq -r '.data.id') + +# 4. Poll import status +curl -X POST http://localhost:5910/call \ + -d "{ + \"tool\": \"data-import-status\", + \"params\": {\"env\": \"staging\", \"jobId\": \"$IMPORT_ID\", \"isZip\": true} + }" +``` + +### Test & Deploy Workflow + +```bash +# 1. Run tests +curl -X POST http://localhost:5910/call \ + -d '{"tool": "unit-tests-run", "params": {"env": "staging"}}' + +# 2. If tests pass, deploy +curl -X POST http://localhost:5910/call \ + -d '{"tool": "deploy-start", "params": {"env": "staging"}}' +``` + +--- + +## Troubleshooting + +### Common Errors + +| Error Code | Cause | Solution | +|-----------|-------|----------| +| `AUTH_MISSING` | No credentials configured | Set up `.pos` file or environment variables | +| `404` | Endpoint not found | Check instance URL and environment | +| `422` | Feature not supported | Feature may be disabled on server | +| `CONFIRMATION_MISMATCH` | Wrong confirmation string | Use exact string `"CLEAN DATA"` | +| `NOT_SUPPORTED` | Server doesn't support feature | Update server or use different endpoint | + +### Debug Mode + +Enable verbose logging: + +```bash +cd mcp-min +MCP_MIN_DEBUG=1 npm start +``` + +### Testing Tools Locally + +```bash +# Start MCP server +cd mcp-min && npm start + +# In another terminal, test a tool +curl -X POST http://localhost:5910/call \ + -H "Content-Type: application/json" \ + -d '{ + "tool": "envs-list", + "params": {} + }' +``` + +--- + +## Summary + +### Tool Count by Category + +- **Environment Management**: 1 (envs-list) +- **Logging & Monitoring**: 2 (logs-fetch, logs-stream) +- **GraphQL & Liquid**: 2 (graphql-exec, liquid-exec) +- **Generators**: 3 (generators-list, generators-help, generators-run) +- **Migrations**: 3 (migrations-list, migrations-generate, migrations-run) +- **Deployment**: 3 (deploy-start, deploy-status, deploy-wait) +- **Data Operations**: 6 (data-import, data-import-status, data-export, data-export-status, data-clean, data-clean-status) +- **Testing**: 1 (unit-tests-run) +- **Linting**: 1 (check) +- **File Sync**: 1 (sync-file) +- **Property Uploads**: 1 (uploads-push) +- **Constants**: 3 (constants-list, constants-set, constants-unset) + +**Total**: 26 active tools + +### Tool Locations + +All tools are located in the `/home/godot/projects/pos-cli/mcp-min/` directory, organized by category: + +- `mcp-min/logs/` - Logging tools +- `mcp-min/liquid/` - Liquid template tools +- `mcp-min/graphql/` - GraphQL tools +- `mcp-min/generators/` - Generator tools +- `mcp-min/migrations/` - Migration tools +- `mcp-min/deploy/` - Deployment tools +- `mcp-min/data/` - Data operation tools +- `mcp-min/tests/` - Testing tools +- `mcp-min/check/` - Linting tools +- `mcp-min/sync/` - File sync tools +- `mcp-min/uploads/` - Property upload tools +- `mcp-min/constants/` - Constants management tools + +### Registration + +All tools are registered in `mcp-min/tools.js` and exported as a single module for use by both HTTP and stdio MCP servers. + +--- + +## See Also + +- [platformOS Documentation](https://documentation.platformos.com) +- [MCP Server Setup](./SSE_GUIDE.md) +- [CLI Reference](./API.md) diff --git a/docs/POS-CLI.md b/docs/POS-CLI.md new file mode 100644 index 000000000..fc00a296f --- /dev/null +++ b/docs/POS-CLI.md @@ -0,0 +1,62 @@ +# pos-cli Integration Reference + +## Architecture Overview + +The MCP server integrates pos-cli as a **library**, not subprocess: + +``` +MCP Server → PlatformOSClient → pos-cli Gateway → platformOS API +``` + +### Key Integration Points + +1. **`lib/apiWrappers.ts`** (pos-cli) + - `PlatformOSClient` - Environment pooling, auth refresh + - `GraphQLWrapper` - `gateway.graph()` + - Direct HTTP wrappers for Liquid, logs, data export + +2. **No Subprocess Spawning** + - Pure Node.js imports: `require('../../lib/apiWrappers')` + - Shared `.pos/envs/` storage + - Same credential format + +3. **Tool Mappings** + +| MCP Tool | pos-cli Method | +|----------|----------------| +| `env.list` | `FsStorage.listEnvs()` | +| `graphql.execute` | `client.graphql(env, query)` | +| `liquid.render` | `client.liquidRender(env, template)` | +| `data.export_start` | `client.dataExportStart()` | + +## Setup for pos-cli Users + +Your existing `.pos/envs/*.json` files work **automatically**! + +```bash +# Same config as pos-cli +cd your-pos-cli-project +cp -r .pos mcp-project/ +cp clients.json mcp-project/ # MCP clients +cd mcp-project +npm run dev +``` + +## Extensibility + +Add new tools by wrapping pos-cli methods: + +```typescript +// src/tools/deploy.tools.ts +const deployTool: Tool = { + name: 'platformos.deploy', + inputSchema: z.object({ env: z.string() }), + handler: async ({ env }) => { + const client = new PlatformOSClient(); + return await client.deploy(env); // Reuse pos-cli deploy logic + } +}; +``` + +--- +**MCP = pos-cli + AI-friendly HTTP tools**","path":"docs/POS-CLI.md \ No newline at end of file diff --git a/docs/SSE_GUIDE.md b/docs/SSE_GUIDE.md new file mode 100644 index 000000000..4d068e881 --- /dev/null +++ b/docs/SSE_GUIDE.md @@ -0,0 +1,618 @@ +# Server-Sent Events (SSE) Streaming Guide + +This guide provides comprehensive documentation for using the Server-Sent Events (SSE) streaming capabilities in the pos-cli MCP server for real-time log monitoring. + +## Overview + +The MCP server provides two streaming tools for accessing platformOS logs in real-time: + +- **`platformos.logs.stream`** - Basic streaming with automatic polling +- **`platformos.logs.live`** - Enhanced streaming with duplicate detection and heartbeats + +Both tools use Server-Sent Events (SSE) to push log data to clients as it becomes available. + +## Quick Start + +### 1. Start the MCP Server + +```bash +cd /path/to/pos-cli +npm run mcp +``` + +The server will start on `http://localhost:3030`. + +### 2. Configure Authentication + +Set your client secret token (from `clients.json`): + +```bash +export CLIENT_SECRET="your-client-secret-here" +``` + +### 3. Start Streaming Logs + +**Basic streaming:** +```bash +curl -X POST http://localhost:3030/call-stream \ + -H "Authorization: Bearer $CLIENT_SECRET" \ + -H "Content-Type: application/json" \ + -d '{"tool":"platformos.logs.stream","input":{"env":"staging"}}' +``` + +**Enhanced streaming with filtering:** +```bash +curl -X POST http://localhost:3030/call-stream \ + -H "Authorization: Bearer $CLIENT_SECRET" \ + -H "Content-Type: application/json" \ + -d '{"tool":"platformos.logs.live","input":{"env":"staging","filter":"error","interval":2000}}' +``` + +## Streaming Tools Reference + +### platformos.logs.stream + +Streams logs continuously by polling the platformOS API at regular intervals. + +**Parameters:** +- `env` (string, required) - Environment name (e.g., "staging", "production") +- `interval` (number, optional) - Polling interval in milliseconds (default: 3000) +- `filter` (string, optional) - Filter logs by type (e.g., "error", "info", "debug") + +**Example:** +```json +{ + "tool": "platformos.logs.stream", + "input": { + "env": "staging", + "interval": 5000, + "filter": "error" + } +} +``` + +### platformos.logs.live + +Enhanced streaming with duplicate detection, heartbeats, and configurable duration. + +**Parameters:** +- `env` (string, required) - Environment name +- `interval` (number, optional) - Polling interval in milliseconds (default: 3000) +- `filter` (string, optional) - Filter logs by type +- `maxDuration` (number, optional) - Maximum streaming duration in milliseconds (default: 300000 = 5 minutes) + +**Example:** +```json +{ + "tool": "platformos.logs.live", + "input": { + "env": "production", + "filter": "error", + "interval": 1000, + "maxDuration": 600000 + } +} +``` + +## SSE Event Format + +The server sends events in the following format: + +``` +event: data +data: {"type":"text","text":"{"id":"123","timestamp":"2024-01-01T12:00:00Z","type":"info","message":"Log message","env":"staging"}"} + +event: error +data: {"type":"error","text":"{"type":"error","message":"Connection failed","timestamp":"2024-01-01T12:00:00Z"}"} + +event: done +data: [DONE] + +: heartbeat + +``` + +### Event Types + +- **`data`** - Contains log entry data +- **`error`** - Contains error information +- **`done`** - Signals stream completion +- **`heartbeat`** - Keep-alive signal (comment line starting with `:`) + +### Log Entry Format + +Each log entry is a JSON object with the following structure: + +```json +{ + "id": "unique-log-id", + "timestamp": "2024-01-01T12:00:00.000Z", + "type": "error|info|debug|warn", + "message": "Human-readable log message", + "data": { + "additional": "contextual data" + }, + "env": "environment-name" +} +``` + +## Client Implementation Examples + +### Node.js SSE Client + +```javascript +const EventSource = require('eventsource'); + +class LogStreamer { + constructor(baseUrl, token) { + this.baseUrl = baseUrl; + this.token = token; + } + + streamLogs(env, options = {}) { + const { tool = 'platformos.logs.stream', interval = 3000, filter } = options; + + const url = `${this.baseUrl}/call-stream`; + const body = JSON.stringify({ + tool, + input: { env, interval, filter } + }); + + // For demo - in production, use a proper SSE client + fetch(url, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${this.token}`, + 'Content-Type': 'application/json' + }, + body + }).then(async response => { + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + const chunk = decoder.decode(value, { stream: true }); + console.log('Raw SSE data:', chunk); + + // Parse SSE events + this.parseSSE(chunk); + } + }); + } + + parseSSE(data) { + const lines = data.split('\n'); + let currentEvent = null; + + for (const line of lines) { + if (line.startsWith('event:')) { + currentEvent = line.substring(6).trim(); + } else if (line.startsWith('data:') && currentEvent) { + const eventData = line.substring(5).trim(); + this.handleEvent(currentEvent, eventData); + } + } + } + + handleEvent(eventType, data) { + switch (eventType) { + case 'data': + try { + const logEntry = JSON.parse(JSON.parse(data).text); + console.log(`[${logEntry.type.toUpperCase()}] ${logEntry.message}`); + } catch (e) { + console.log('Log data:', data); + } + break; + case 'error': + console.error('Stream error:', data); + break; + case 'done': + console.log('Stream completed'); + break; + } + } +} + +// Usage +const streamer = new LogStreamer('http://localhost:3030', 'your-token'); +streamer.streamLogs('staging', { filter: 'error' }); +``` + +### Python SSE Client + +```python +import requests +import json +import sseclient # pip install sseclient-py + +class LogStreamer: + def __init__(self, base_url, token): + self.base_url = base_url + self.token = token + self.headers = { + 'Authorization': f'Bearer {token}', + 'Content-Type': 'application/json' + } + + def stream_logs(self, env, tool='platformos.logs.stream', **options): + url = f"{self.base_url}/call-stream" + payload = { + 'tool': tool, + 'input': {'env': env, **options} + } + + response = requests.post(url, json=payload, headers=self.headers, stream=True) + response.raise_for_status() + + client = sseclient.SSEClient(response) + + for event in client.events(): + self.handle_event(event.event, event.data) + + def handle_event(self, event_type, data): + if event_type == 'data': + try: + # Parse the nested JSON structure + log_data = json.loads(json.loads(data).get('text', '{}')) + timestamp = log_data.get('timestamp', 'unknown') + log_type = log_data.get('type', 'info').upper() + message = log_data.get('message', '') + + print(f"[{timestamp}] [{log_type}] {message}") + except json.JSONDecodeError: + print(f"Raw data: {data}") + + elif event_type == 'error': + print(f"Error: {data}") + elif event_type == 'done': + print("Stream completed") + elif event_type == 'heartbeat': + print(".", end="", flush=True) # Show heartbeat as dots + +# Usage +streamer = LogStreamer('http://localhost:3030', 'your-token') +streamer.stream_logs('staging', filter='error', interval=2000) +``` + +### JavaScript (Browser) SSE Client + +```javascript +class LogStreamer { + constructor(baseUrl, token) { + this.baseUrl = baseUrl; + this.token = token; + } + + async streamLogs(env, options = {}) { + const { tool = 'platformos.logs.stream', interval = 3000, filter } = options; + + try { + const response = await fetch(`${this.baseUrl}/call-stream`, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${this.token}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + tool, + input: { env, interval, filter } + }) + }); + + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ''; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + + // Process complete SSE messages + const messages = buffer.split('\n\n'); + buffer = messages.pop(); // Keep incomplete message in buffer + + for (const message of messages) { + this.processSSEMessage(message.trim()); + } + } + } catch (error) { + console.error('Streaming error:', error); + } + } + + processSSEMessage(message) { + const lines = message.split('\n'); + let eventType = ''; + let data = ''; + + for (const line of lines) { + if (line.startsWith('event:')) { + eventType = line.substring(6).trim(); + } else if (line.startsWith('data:')) { + data += line.substring(5).trim(); + } + } + + this.handleEvent(eventType, data); + } + + handleEvent(eventType, data) { + const output = document.getElementById('log-output'); + + switch (eventType) { + case 'data': + try { + const logEntry = JSON.parse(JSON.parse(data).text); + const logDiv = document.createElement('div'); + logDiv.className = `log-entry log-${logEntry.type}`; + logDiv.textContent = `[${logEntry.timestamp}] ${logEntry.message}`; + output.appendChild(logDiv); + output.scrollTop = output.scrollHeight; + } catch (e) { + console.error('Parse error:', e); + } + break; + + case 'error': + const errorDiv = document.createElement('div'); + errorDiv.className = 'log-entry log-error'; + errorDiv.textContent = `ERROR: ${data}`; + output.appendChild(errorDiv); + break; + + case 'done': + const doneDiv = document.createElement('div'); + doneDiv.className = 'log-entry log-info'; + doneDiv.textContent = 'Stream completed'; + output.appendChild(doneDiv); + break; + } + } +} + +// Usage in browser +const streamer = new LogStreamer('http://localhost:3030', 'your-token'); +streamer.streamLogs('staging', { filter: 'error' }); +``` + +### HTML for Browser Example + +```html + + + + Log Streamer + + + +

Real-time Log Streaming

+
+ + + +``` + +## Advanced Usage Patterns + +### Error Monitoring Dashboard + +```javascript +class ErrorMonitor { + constructor(streamer) { + this.streamer = streamer; + this.errorCount = 0; + this.errors = []; + } + + async monitor(env) { + await this.streamer.streamLogs(env, { + tool: 'platformos.logs.live', + filter: 'error', + interval: 1000 + }); + } + + handleEvent(eventType, data) { + if (eventType === 'data') { + const logEntry = JSON.parse(JSON.parse(data).text); + if (logEntry.type === 'error') { + this.errorCount++; + this.errors.push(logEntry); + + // Alert if too many errors + if (this.errorCount > 10) { + this.sendAlert(); + } + } + } + } + + sendAlert() { + console.error(`High error rate detected: ${this.errorCount} errors`); + // Send notification, webhook, etc. + } +} +``` + +### Log Aggregation and Analysis + +```javascript +class LogAnalyzer { + constructor() { + this.stats = { + total: 0, + byType: {}, + byHour: {}, + errors: [] + }; + } + + processLog(logEntry) { + this.stats.total++; + + // Count by type + this.stats.byType[logEntry.type] = (this.stats.byType[logEntry.type] || 0) + 1; + + // Count by hour + const hour = new Date(logEntry.timestamp).getHours(); + this.stats.byHour[hour] = (this.stats.byHour[hour] || 0) + 1; + + // Collect errors + if (logEntry.type === 'error') { + this.stats.errors.push(logEntry); + } + } + + getReport() { + return { + totalLogs: this.stats.total, + typeBreakdown: this.stats.byType, + hourlyDistribution: this.stats.byHour, + recentErrors: this.stats.errors.slice(-10) + }; + } +} +``` + +## Troubleshooting + +### Common Issues + +1. **Connection drops frequently** + - Check network connectivity + - Reduce polling interval + - Use `platformos.logs.live` for better connection management + +2. **Missing logs** + - Verify environment name is correct + - Check authentication token + - Ensure the environment has logging enabled + +3. **High latency** + - Increase polling interval + - Check server response times + - Use filtering to reduce data volume + +4. **Memory usage** + - Implement log rotation/cleanup + - Limit streaming duration + - Process logs in batches + +### Connection Recovery + +```javascript +class ResilientStreamer extends LogStreamer { + constructor(baseUrl, token, maxRetries = 3) { + super(baseUrl, token); + this.maxRetries = maxRetries; + this.retryCount = 0; + } + + async streamLogs(env, options = {}) { + try { + await super.streamLogs(env, options); + this.retryCount = 0; // Reset on success + } catch (error) { + if (this.retryCount < this.maxRetries) { + this.retryCount++; + console.log(`Retrying connection (${this.retryCount}/${this.maxRetries})...`); + + // Exponential backoff + const delay = Math.pow(2, this.retryCount) * 1000; + setTimeout(() => this.streamLogs(env, options), delay); + } else { + console.error('Max retries exceeded'); + } + } + } +} +``` + +### Performance Optimization + +- **Use appropriate polling intervals**: Balance between real-time needs and server load +- **Implement filtering**: Reduce data transfer by filtering on the server side +- **Batch processing**: Process multiple log entries together when possible +- **Connection pooling**: Reuse connections for multiple streaming sessions +- **Compression**: Enable gzip compression for large data transfers + +## API Reference + +### Endpoints + +- `POST /call-stream` - Execute streaming tools via SSE +- `GET /health` - Server health check +- `GET /tools` - List available tools + +### Authentication + +All requests require Bearer token authentication: + +``` +Authorization: Bearer +``` + +### Response Codes + +- `200` - Success, streaming begins +- `401` - Unauthorized (invalid token) +- `404` - Tool not found +- `400` - Invalid input parameters + +### Rate Limiting + +- Default: 100 requests per minute per client +- Streaming connections: 10 concurrent streams per client +- Burst limit: 20 requests per 10 seconds + +## Best Practices + +1. **Choose the right tool**: Use `logs.live` for production monitoring, `logs.stream` for development +2. **Implement proper error handling**: Always handle connection failures and parsing errors +3. **Use filtering**: Filter logs on the server side to reduce bandwidth and processing +4. **Monitor resource usage**: Watch memory usage and connection counts +5. **Implement reconnection logic**: Handle network interruptions gracefully +6. **Log your logging**: Keep track of your streaming client's performance +7. **Security**: Never expose streaming endpoints publicly without proper authentication + +## Examples in the Repository + +See the `examples/` directory for complete working examples: + +- `examples/mcp-sse-client.js` - Node.js SSE client +- `examples/python-client.py` - Python client (REST API) +- `examples/mcp-client.js` - Basic MCP client + +## Contributing + +When contributing to the streaming functionality: + +1. Test with both streaming tools +2. Verify SSE event parsing works correctly +3. Test error conditions and recovery +4. Update client examples as needed +5. Document any new features or parameters +docs/SSE_GUIDE.md \ No newline at end of file diff --git a/examples/app/schema/photo.yml b/examples/app/schema/photo.yml new file mode 100644 index 000000000..fcc45933d --- /dev/null +++ b/examples/app/schema/photo.yml @@ -0,0 +1,52 @@ +name: photo +properties: + - name: title + type: string + - name: subtitle + type: string + - name: description + type: text + - name: category + type: string + - name: tags + type: array + - name: original_filename + type: string + - name: file_size + type: integer + - name: imported_at + type: datetime + - name: image + type: upload + options: + content_length: + lte: 2048 + versions: + - name: thumbnail + output: + format: webp + quality: 80 + resize: + width: 300 + height: 300 + fit: cover + - name: medium + output: + format: webp + quality: 85 + resize: + width: 800 + height: 800 + fit: contain + - name: large + output: + format: webp + quality: 90 + resize: + width: 1600 + height: 1600 + fit: contain + - name: full + output: + format: webp + quality: 95 diff --git a/examples/mcp-client.js b/examples/mcp-client.js new file mode 100644 index 000000000..c4eeafd5b --- /dev/null +++ b/examples/mcp-client.js @@ -0,0 +1,74 @@ +#!/usr/bin/env node +/** + * MCP Client Example - Node.js + * Demonstrates connecting to MCP server and calling tools + */ + +const fetch = require('node-fetch'); // npm i node-fetch + +async function main() { + const BASE_URL = 'http://localhost:3030'; + const TOKEN = 'client-secret'; // from clients.json + + console.log('🤖 MCP Client Example\\n'); + + // 1. List tools + console.log('1. Listing tools...'); + const toolsRes = await fetch(`${BASE_URL}/tools`, { + headers: { 'Authorization': `Bearer ${TOKEN}` } + }); + const tools = await toolsRes.json(); + console.log(`Available: ${tools.tools.length} tools`); + console.log(tools.tools.map(t => ` • ${t.name}`).join('\\n')); + + // 2. List environments + console.log('\\n2. Listing environments...'); + const envRes = await fetch(`${BASE_URL}/call`, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${TOKEN}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + tool: 'platformos.env.list', + input: {} + }) + }); + const envData = await envRes.json(); + const envs = JSON.parse(envData.content[0].text); + console.log('Envs:', envs.envs?.map(e => e.name).join(', ') || 'None'); + + // 3. GraphQL example (if staging env exists) + if (envs.envs?.some(e => e.name === 'staging')) { + console.log('\\n3. GraphQL query...'); + const gqlRes = await fetch(`${BASE_URL}/call`, { + method: 'POST', + headers: { + 'Authorization': `Bearer ${TOKEN}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + tool: 'platformos.graphql.execute', + input: { + env: 'staging', + query: ` + query { + __schema { + types { + name + } + } + } + ` + } + }) + }); + const gqlData = await gqlRes.json(); + console.log('GraphQL:', JSON.parse(gqlData.content[0].text)); + } + + console.log('\\n✅ Demo complete! Check server logs.'); +} + +main().catch(console.error); +","path":"examples/mcp-client.js \ No newline at end of file diff --git a/examples/python-client.py b/examples/python-client.py new file mode 100644 index 000000000..2b427575e --- /dev/null +++ b/examples/python-client.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +\"\"\"MCP Client Example - Python +Demonstrates MCP server interaction with requests library +$ pip install requests +\"\"\" +import requests +import json +import sys + +BASE_URL = 'http://localhost:3030' +TOKEN = 'client-secret' # from clients.json + +def main(): + print('🤖 MCP Python Client\\n') + + headers = { + 'Authorization': f'Bearer {TOKEN}', + 'Content-Type': 'application/json' + } + + # 1. List tools + print('1. Listing tools...') + r = requests.get(f'{BASE_URL}/tools', headers=headers) + r.raise_for_status() + tools = r.json() + print(f'Available: {len(tools[\"tools\"])} tools') + for tool in tools['tools']: + print(f' • {tool[\"name\"]} - {tool[\"description\"][:50]}...') + + # 2. List environments + print('\\n2. Listing environments...') + r = requests.post(f'{BASE_URL}/call', + headers=headers, + json={'tool': 'platformos.env.list', 'input': {}}) + r.raise_for_status() + data = r.json() + envs = json.loads(data['content'][0]['text']) + env_names = [e['name'] for e in envs.get('envs', [])] + print(f'Envs: {\", \".join(env_names) or \"None\"}') + + # 3. Liquid render demo + print('\\n3. Liquid render demo...') + r = requests.post(f'{BASE_URL}/call', + headers=headers, + json={ + 'tool': 'platformos.liquid.render', + 'input': { + 'env': 'staging' if 'staging' in env_names else env_names[0] if env_names else None, + 'template': 'Hello {{name}}! Today is {{ \"now\" | date: \"%Y-%m-%d\" }}', + 'locals': {'name': 'Python MCP Client'} + } + }) + if r.status_code == 200: + data = r.json() + result = json.loads(data['content'][0]['text']) + print('Output:', result['output']) + else: + print('Error:', r.text) + + # 4. Health check (admin) + print('\\n4. Server health (ADMIN_API_KEY required)...') + r = requests.get(f'{BASE_URL}/health', headers={'x-api-key': 'your-admin-key'}) + if r.status_code == 200: + health = r.json() + print(f'Status: {health[\"status\"]}, Tools: {health[\"toolCount\"]}') + else: + print('Admin check failed (expected without ADMIN_API_KEY)') + + print('\\n✅ Demo complete! 🎉') + +if __name__ == '__main__': + main() +","path":"examples/python-client.py \ No newline at end of file diff --git a/jest.config.mcp-min.json b/jest.config.mcp-min.json new file mode 100644 index 000000000..89be17fdb --- /dev/null +++ b/jest.config.mcp-min.json @@ -0,0 +1,14 @@ +{ + "testMatch": ["/mcp-min/**/__tests__/**/*.js"], + "setupFiles": ["/mcp/jest.setup.js"], + "collectCoverage": true, + "collectCoverageFrom": ["mcp-min/**/*.js"], + "coverageThreshold": { + "./mcp-min/**": { + "branches": 50, + "functions": 50, + "lines": 55, + "statements": 55 + } + } +} diff --git a/lib/data/waitForStatus.js b/lib/data/waitForStatus.js index a185cd397..e85461b90 100644 --- a/lib/data/waitForStatus.js +++ b/lib/data/waitForStatus.js @@ -2,29 +2,41 @@ import logger from '../logger.js'; const waitForStatus = (statusCheck, pendingStatus, successStatus, interval = 5000, cb = null) => { return new Promise((resolve, reject) => { + let pollCount = 0; let getStatus = () => { + pollCount++; statusCheck() .then(response => { try { + logger.Debug(`[waitForStatus] Poll #${pollCount}, response: ${JSON.stringify(response)}`); const status = response.status.name || response.status; + logger.Debug(`[waitForStatus] Status: ${status}, pending: ${pendingStatus.includes(status)}, success: ${status === successStatus}`); if (cb) cb(response); if (pendingStatus.includes(status)) setTimeout(getStatus, interval); - else if (status === successStatus) + else if (status === successStatus) { + logger.Debug(`[waitForStatus] Success, resolving with access_token: ${response.access_token}`); resolve(response); - else if (status === 'failed') + } + else if (status === 'failed') { + logger.Debug(`[waitForStatus] Failed with status: ${status}`); reject(response); - else + } + else { + logger.Debug(`[waitForStatus] Unknown status: ${status}, continuing poll`); setTimeout(getStatus, interval); + } } catch(e) { - reject(e); + logger.Debug(`[waitForStatus] Error processing response: ${e.message}`); + reject(e); } }) .catch((error) => { - logger.Debug('[ERR] waitForStatus did not receive `status` in response object', error); + logger.Debug('[waitForStatus] Poll error', error); reject(error); }); }; + logger.Debug(`[waitForStatus] Starting poll, interval: ${interval}ms`); getStatus(); }); }; diff --git a/lib/environments.js b/lib/environments.js index 5832a83e6..7c9326229 100644 --- a/lib/environments.js +++ b/lib/environments.js @@ -6,7 +6,7 @@ import logger from '../lib/logger.js'; import waitForStatus from '../lib/data/waitForStatus.js'; const storeEnvironment = settings => { - logger.Debug(`[storeEnvironment] ${JSON.stringify(settings, null, 2)}`); + logger.Debug(`[storeEnvironment] Input settings: ${JSON.stringify(settings, null, 2)}`); const environmentSettings = { [settings.environment]: { @@ -20,45 +20,70 @@ const storeEnvironment = settings => { const configPath = files.getConfigPath(); logger.Debug(`[storeEnvironment] Current config path: ${configPath}`); - const newSettings = Object.assign({}, files.getConfig(), environmentSettings); + const existingConfig = files.getConfig(); + logger.Debug(`[storeEnvironment] Existing config: ${JSON.stringify(existingConfig)}`); + + const newSettings = Object.assign({}, existingConfig, environmentSettings); + logger.Debug(`[storeEnvironment] New config to write: ${JSON.stringify(newSettings)}`); + fs.writeFileSync(configPath, JSON.stringify(newSettings, null, 2)); + logger.Debug(`[storeEnvironment] Successfully wrote config to ${configPath}`); }; const waitForAccessToken = async (deviceCode, interval) => { + logger.Debug('[waitForAccessToken] Starting token polling'); + logger.Debug('[waitForAccessToken] deviceCode: ' + deviceCode); + logger.Debug('[waitForAccessToken] interval: ' + interval + 'ms'); + const tokenResponse = await waitForStatus( () => { + logger.Debug('[waitForAccessToken] Fetching device token'); return Portal.fetchDeviceAccessToken(deviceCode).then(response => { + logger.Debug('[waitForAccessToken] Token response status: ' + (response.status || response.statusCode)); + logger.Debug('[waitForAccessToken] Token response keys: ' + Object.keys(response).join(', ')); let token; if (response['access_token']) { token = { ...response, status: 'success' }; + logger.Debug('[waitForAccessToken] access_token found in response'); } else { + logger.Debug('[waitForAccessToken] No access_token in response'); throw `Unhandled response: ${response.statusCode}`; } return Promise.resolve(token); }) .catch(request => { + logger.Debug('[waitForAccessToken] Token fetch error, statusCode: ' + request.statusCode); switch (request.statusCode) { case 400: { const token = { status: request.response.body.error }; + logger.Debug('[waitForAccessToken] 400 error, error: ' + request.response.body.error); return Promise.resolve(token); } default: + logger.Debug('[waitForAccessToken] Unexpected error, throwing'); throw request; } }); }, 'authorization_pending', 'success', interval ); + logger.Debug('[waitForAccessToken] Completed, tokenResponse: ' + JSON.stringify(tokenResponse)); return tokenResponse['access_token']; }; const deviceAuthorizationFlow = async (instanceUrl) => { const instanceDomain = (new URL(instanceUrl)).hostname; + logger.Debug('[deviceAuthorizationFlow] Instance URL: ' + instanceUrl); + logger.Debug('[deviceAuthorizationFlow] Instance domain: ' + instanceDomain); + logger.Debug('[deviceAuthorizationFlow] Partner Portal URL: ' + Portal.url()); + let deviceAuthorizationResponse; try { + logger.Debug('[deviceAuthorizationFlow] Requesting device authorization...'); deviceAuthorizationResponse = await Portal.requestDeviceAuthorization(instanceDomain); + logger.Debug('[deviceAuthorizationFlow] Device authorization response:', deviceAuthorizationResponse); } catch (error) { // Handle the case where instance is not registered in partner portal if (error.statusCode === 404 && error.options?.uri?.includes('/oauth/authorize_device')) { @@ -79,7 +104,10 @@ const deviceAuthorizationFlow = async (instanceUrl) => { const deviceCode = deviceAuthorization['device_code']; const interval = (deviceAuthorization['interval'] || 5) * 1000; - logger.Debug('verificationUrl', verificationUrl); + logger.Debug('[deviceAuthorizationFlow] Verification URL: ' + verificationUrl); + logger.Debug('[deviceAuthorizationFlow] Device code: ' + deviceCode); + logger.Debug('[deviceAuthorizationFlow] Poll interval: ' + interval + 'ms'); + logger.Debug('[deviceAuthorizationFlow] Waiting for user to authorize at ' + verificationUrl); const openFn = process.env['CI'] ? console.log : open; try { await openFn(verificationUrl); @@ -92,6 +120,7 @@ const deviceAuthorizationFlow = async (instanceUrl) => { } const accessToken = await waitForAccessToken(deviceCode, interval); + logger.Debug('[deviceAuthorizationFlow] Received access token, length: ' + (accessToken?.length || 0)); return accessToken; }; diff --git a/lib/envs/add.js b/lib/envs/add.js index a41a759a1..4caf09249 100644 --- a/lib/envs/add.js +++ b/lib/envs/add.js @@ -26,6 +26,10 @@ const login = async (email, password, url) => { }; const addEnv = async (environment, params) => { + logger.Debug(`[addEnv] Adding environment: ${environment}`); + logger.Debug(`[addEnv] URL: ${params.url}`); + logger.Debug(`[addEnv] Partner Portal URL: ${params.partnerPortalUrl || 'default'}`); + checkParams(environment, params); if (params.partnerPortalUrl) { process.env['PARTNER_PORTAL_HOST'] ||= params.partnerPortalUrl; @@ -55,7 +59,12 @@ const addEnv = async (environment, params) => { token = await login(params.email, password, params.url); } - if (token) saveToken(settings, token); + if (token) { + logger.Debug('[addEnv] Token obtained, length: ' + (token?.length || 0)); + saveToken(settings, token); + } else { + logger.Debug('[addEnv] ERROR: Token is falsy, cannot save environment'); + } }; export default addEnv; diff --git a/lib/initilizeEsmModules.js b/lib/initilizeEsmModules.js new file mode 100644 index 000000000..7be35b6b6 --- /dev/null +++ b/lib/initilizeEsmModules.js @@ -0,0 +1,2 @@ +module.exports = { +}; diff --git a/lib/portal.js b/lib/portal.js index d9e35b25e..fd8806c24 100644 --- a/lib/portal.js +++ b/lib/portal.js @@ -55,6 +55,8 @@ const Portal = { }); }, requestDeviceAuthorization: (instanceDomain) => { + logger.Debug(`[Portal.requestDeviceAuthorization] Sending request to ${Portal.url()}/oauth/authorize_device`); + logger.Debug(`[Portal.requestDeviceAuthorization] Instance domain: ${instanceDomain}`); return apiRequest({ method: 'POST', uri: `${Portal.url()}/oauth/authorize_device`, @@ -65,6 +67,7 @@ const Portal = { }); }, fetchDeviceAccessToken: (deviceCode) => { + logger.Debug(`[Portal.fetchDeviceAccessToken] Fetching access token from ${Portal.url()}/oauth/device_token`); return apiRequest({ method: 'POST', uri: `${Portal.url()}/oauth/device_token`, diff --git a/lib/proxy.js b/lib/proxy.js index b145b2d84..245c458aa 100644 --- a/lib/proxy.js +++ b/lib/proxy.js @@ -42,7 +42,7 @@ class Gateway { } dataExportStart(export_internal, csv_import = false) { - const formData = { export_internal }; + const formData = { export_internal: String(export_internal) }; let uri = `${this.api_url}/exports`; if (csv_import) { uri += '?csv_export=true'; diff --git a/lib/push.js b/lib/push.js index ac15549ac..b3524a84b 100644 --- a/lib/push.js +++ b/lib/push.js @@ -46,7 +46,7 @@ const getDeploymentStatus = ({ id }) => { .getStatus(id) .then(response => { if (response && response.status === 'ready_for_import') { - setTimeout(getStatus, 1500); + setTimeout(getStatus, 1000); } else if (response && response.status === 'error') { const body = response.error; let message = body.error; diff --git a/lib/reports/r-slow.json b/lib/reports/r-slow.json index 0cb63ef47..c4164fbcf 100644 --- a/lib/reports/r-slow.json +++ b/lib/reports/r-slow.json @@ -1,6 +1,6 @@ { "aggs": { - "histogram": "SELECT lb_status_code, target_status_code, http_request_path as path, count(target_status_code) as count, avg(target_processing_time) as avg_target_processing_time FROM query GROUP BY lb_status_code, target_status_code, path ORDER BY avg_target_processing_time DESC limit 20" + "histogram": "SELECT lb_status_code, target_status_code, http_request_path as path, count(target_status_code) as count, approx_percentile_cont(target_processing_time, 0.5) as avg_target_processing_time FROM query GROUP BY lb_status_code, target_status_code, path ORDER BY avg_target_processing_time DESC limit 20" }, "query": { "sql": "select * from requests where target_processing_time > 1" diff --git a/lib/s3UploadFile.js b/lib/s3UploadFile.js index aa3276513..cbb306d4c 100644 --- a/lib/s3UploadFile.js +++ b/lib/s3UploadFile.js @@ -4,11 +4,13 @@ import mime from 'mime'; const uploadFile = async (fileName, s3Url) => { const stats = fs.statSync(fileName); const fileBuffer = fs.readFileSync(fileName); + const contentType = mime.getType(fileName); const response = await fetch(s3Url, { method: 'PUT', headers: { - 'Content-Length': stats['size'].toString() + 'Content-Length': stats['size'].toString(), + 'Content-Type': contentType }, body: fileBuffer }); @@ -27,10 +29,15 @@ const uploadFileFormData = async (filePath, data) => { formData.append(k, v); }); - formData.append('Content-Type', mime.getType(filePath)); - const fileBuffer = fs.readFileSync(filePath); - formData.append('file', new Blob([fileBuffer]), filePath.split('/').pop()); + const contentType = mime.getType(filePath); + + if (!data.fields['Content-Type']) { + formData.append('Content-Type', contentType); + } + + const fileName = filePath.split('/').pop(); + formData.append('file', new File([fileBuffer], fileName, { type: contentType })); const response = await fetch(data.url, { method: 'POST', diff --git a/lib/server.js b/lib/server.js index b9be9ae2e..29df4a4e5 100644 --- a/lib/server.js +++ b/lib/server.js @@ -17,6 +17,7 @@ import logger from '../lib/logger.js'; const start = (env, client) => { const port = env.PORT || 3333; + const host = env.HOST || 'localhost'; const app = express(); const legacy = express(); @@ -80,10 +81,10 @@ const start = (env, client) => { gateway .logsv2({ ...req.query }) .then(body => { - res.send(body); + res.send(body); }) .catch(error => { - logger.Debug(error); res.send(error); + logger.Debug(error); res.send(error); }); }); @@ -91,10 +92,10 @@ const start = (env, client) => { gateway .logsv2(req.body) .then(body => { - res.send(body); + res.send(body); }) .catch(error => { - logger.Debug(error); res.send(error); + logger.Debug(error); res.send(error); }); }); @@ -123,14 +124,14 @@ const start = (env, client) => { ); app - .listen(port, function() { + .listen(port, host, function() { logger.Debug(`Server is listening on ${port}`); logger.Success(`Connected to ${env.MARKETPLACE_URL}`); - logger.Success(`Admin: http://localhost:${port}`); + logger.Success(`Admin: http://${host}:${port}`); logger.Success('---'); - logger.Success(`GraphiQL IDE: http://localhost:${port}/gui/graphql`); - logger.Success(`Liquid evaluator: http://localhost:${port}/gui/liquid`); - logger.Success(`Instance Logs: http://localhost:${port}/logs`); + logger.Success(`GraphiQL IDE: http://${host}:${port}/gui/graphql`); + logger.Success(`Liquid evaluator: http://${host}:${port}/gui/liquid`); + logger.Success(`Instance Logs: http://${host}:${port}/logs`); }) .on('error', err => { if (err.errno === 'EADDRINUSE') { @@ -146,7 +147,7 @@ const start = (env, client) => { legacy .listen(parseInt(port)+1, function(){}) .on('error', _err => { - logger.Error(`Could not run the legacy admin panel at http://localhost:${parseInt(port)+1}`); + logger.Error(`Could not run the legacy admin panel at http://${host}:${parseInt(port)+1}`); }); }; diff --git a/mcp-min/.npmignore b/mcp-min/.npmignore new file mode 100644 index 000000000..be54a5058 --- /dev/null +++ b/mcp-min/.npmignore @@ -0,0 +1,5 @@ +node_modules +__tests__ +package-lock.json +.pos +.env diff --git a/mcp-min/README.md b/mcp-min/README.md new file mode 100644 index 000000000..679228af4 --- /dev/null +++ b/mcp-min/README.md @@ -0,0 +1,46 @@ +mcp-min: Minimal MCP server (stdio + HTTP with SSE) + +Purpose +- Demonstration subpackage that runs both a JSON-line stdio transport and an HTTP server +- HTTP endpoints: /health, /tools, /call, /call-stream (POST) +- Includes tools: echo, list-envs (reads .pos and returns environments) + +Run +- cd mcp-min +- npm install +- npm start + +Debug mode +- To enable verbose debug logging, use: npm run start:debug +- Or set env variable manually: MCP_MIN_DEBUG=1 node index.js +- Debug logs include: detailed HTTP access logs, request/response tracing, stdio requests/responses, SSE connection status and heartbeats, and tool-level progress + +Root configuration (recommended) +- Server runs at root path (/). +- Endpoints: GET /health, GET /tools, POST /call, POST /call-stream +- SSE handshake on GET / (Accept: text/event-stream) emits first event: + event: endpoint + data: /call-stream +- This matches clients (like cagent) that connect to base URL and expect an absolute endpoint path. + +Client example (cagent) +- url: http://localhost:5910 +- transport_type: sse +- headers: { Authorization: "Bearer " } + +Files +- index.js: entry point; starts stdio and HTTP servers +- stdio-server.js: simple JSON-line protocol over stdin/stdout +- http-server.js: Express-based HTTP API and SSE streaming endpoint +- tools.js: Tool registry with handlers (echo, list-envs, sync.singleFile) +- sync/single-file.js: Extracted implementation of sync.singleFile tool +- sse.js: Server-Sent Events helpers and heartbeat +- config.js: Centralized DEBUG flag and debugLog helper + +SSE framing and heartbeat +- Each SSE message is framed using optional "event: " and one or more "data: " lines followed by an empty line +- Heartbeat is sent every 15s as a comment line starting with ':' to keep intermediaries from closing idle connections + +Notes +- ESM package. Keep dependencies minimal (express, body-parser, morgan) +- Designed to be a minimal, self-contained example in a single process diff --git a/mcp-min/__tests__/check-run.test.js b/mcp-min/__tests__/check-run.test.js new file mode 100644 index 000000000..36832db92 --- /dev/null +++ b/mcp-min/__tests__/check-run.test.js @@ -0,0 +1,97 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import os from 'os'; +import fs from 'fs'; +import { describe, test, expect, beforeAll } from 'vitest'; + +const checkRunModPath = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'check', 'run.js')).href; + +describe('platformos.check-run', () => { + let checkRunTool; + + beforeAll(async () => { + const mod = await import(checkRunModPath); + checkRunTool = mod.default; + }); + + test('has correct description', () => { + expect(checkRunTool.description).toContain('platformos-check'); + expect(checkRunTool.description).toContain('Node.js'); + }); + + test('has input schema with expected properties', () => { + expect(checkRunTool.inputSchema.type).toBe('object'); + expect(checkRunTool.inputSchema.properties.appPath).toBeDefined(); + expect(checkRunTool.inputSchema.properties.autoFix).toBeDefined(); + }); + + test('returns PATH_NOT_FOUND for non-existent path', async () => { + const result = await checkRunTool.handler({ appPath: '/tmp/does-not-exist-xyz-123' }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('PATH_NOT_FOUND'); + expect(result.error.message).toContain('/tmp/does-not-exist-xyz-123'); + }); + + test('returns NOT_A_DIRECTORY for file path', async () => { + // Use a file that definitely exists + const tmpFile = path.join(os.tmpdir(), 'check-run-test-file.txt'); + fs.writeFileSync(tmpFile, 'test', 'utf8'); + + try { + const result = await checkRunTool.handler({ appPath: tmpFile }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('NOT_A_DIRECTORY'); + expect(result.error.message).toContain(tmpFile); + } finally { + fs.unlinkSync(tmpFile); + } + }); + + test('includes timing metadata on success or dependency error', async () => { + const result = await checkRunTool.handler({ appPath: '.' }); + + if (result.ok) { + // theme-check-node is installed + expect(result.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(result.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(result.meta.appPath).toBeDefined(); + expect(result.data).toBeDefined(); + expect(typeof result.data.offenseCount).toBe('number'); + expect(typeof result.data.filesChecked).toBe('number'); + expect(typeof result.data.autoFixed).toBe('boolean'); + expect(result.data.autoFixed).toBe(false); + expect(Array.isArray(result.data.files)).toBe(true); + } else { + // theme-check-node is not installed — graceful handling + expect(result.error.code).toBe('MISSING_DEPENDENCY'); + expect(result.error.message).toContain('@platformos/platformos-check-node'); + } + }); + + test('returns structured file data when dependency is available', async () => { + const result = await checkRunTool.handler({ appPath: '.' }); + + if (!result.ok && result.error.code === 'MISSING_DEPENDENCY') { + // Skip — dependency not installed + return; + } + + expect(result.ok).toBe(true); + expect(typeof result.data.errorCount).toBe('number'); + expect(typeof result.data.warningCount).toBe('number'); + expect(typeof result.data.infoCount).toBe('number'); + expect(typeof result.data.fileCount).toBe('number'); + + // Verify each file entry structure + for (const file of result.data.files) { + expect(typeof file.path).toBe('string'); + expect(Array.isArray(file.offenses)).toBe(true); + expect(typeof file.errorCount).toBe('number'); + expect(typeof file.warningCount).toBe('number'); + expect(typeof file.infoCount).toBe('number'); + } + }); +}); diff --git a/mcp-min/__tests__/check.test.js b/mcp-min/__tests__/check.test.js new file mode 100644 index 000000000..ccbef7ebd --- /dev/null +++ b/mcp-min/__tests__/check.test.js @@ -0,0 +1,150 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { exec } from 'child_process'; +import { promisify } from 'util'; +import { describe, test, expect, beforeAll } from 'vitest'; + +const execAsync = promisify(exec); +const checkModPath = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'check', 'index.js')).href; + +let cliAvailable = false; + +describe('platformos.check', () => { + let checkTool; + + beforeAll(async () => { + const mod = await import(checkModPath); + checkTool = mod.default; + + // Check if platformos-check CLI is available + try { + await execAsync('platformos-check --version'); + cliAvailable = true; + } catch { + cliAvailable = false; + } + }); + + test('has correct description', () => { + expect(checkTool.description).toContain('platformos-check'); + expect(checkTool.description).toContain('Liquid'); + }); + + test('has input schema with required properties', () => { + expect(checkTool.inputSchema.type).toBe('object'); + expect(checkTool.inputSchema.properties.appPath).toBeDefined(); + expect(checkTool.inputSchema.properties.format).toBeDefined(); + expect(checkTool.inputSchema.properties.autoCorrect).toBeDefined(); + expect(checkTool.inputSchema.properties.list).toBeDefined(); + }); + + test('lists enabled checks with list=true', async () => { + const result = await checkTool.handler({ list: true }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + expect(result.error.code).toBe('CHECK_ERROR'); + return; + } + + expect(result.ok).toBe(true); + expect(result.data).toBeDefined(); + expect(result.data.listChecks).toBe(true); + expect(typeof result.data.result).toBe('string'); + expect(result.data.result).toMatch(/SyntaxError|InvalidArgs|MissingTemplate/); + }); + + test('prints config with print=true', async () => { + const result = await checkTool.handler({ print: true }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + expect(result.error.code).toBe('CHECK_ERROR'); + return; + } + + expect(result.ok).toBe(true); + expect(result.data).toBeDefined(); + expect(result.data.printConfig).toBe(true); + expect(typeof result.data.result).toBe('string'); + expect(result.data.result.length).toBeGreaterThan(100); + }); + + test('runs check and returns results', async () => { + const result = await checkTool.handler({ format: 'json', appPath: '.' }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + expect(result.error.code).toBe('CHECK_ERROR'); + return; + } + + expect(result.ok).toBe(true); + expect(result.data).toBeDefined(); + expect(result.data.format).toBe('json'); + expect(result.data.appPath).toBe('.'); + expect(result.meta).toBeDefined(); + expect(result.meta.startedAt).toBeDefined(); + expect(result.meta.finishedAt).toBeDefined(); + }); + + test('accepts multiple categories as array', async () => { + const result = await checkTool.handler({ + format: 'json', + category: ['liquid', 'html'], + appPath: '.' + }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + return; + } + + expect(result.ok).toBe(true); + }); + + test('accepts excludeCategory as array', async () => { + const result = await checkTool.handler({ + format: 'json', + excludeCategory: ['performance'], + appPath: '.' + }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + return; + } + + expect(result.ok).toBe(true); + }); + + test('handles autoCorrect parameter', async () => { + const result = await checkTool.handler({ + format: 'json', + autoCorrect: true, + appPath: '.', + list: false + }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + return; + } + + expect(result.ok).toBe(true); + expect(result.data.autoCorrect).toBe(true); + }); + + test('includes timing metadata', async () => { + const result = await checkTool.handler({ list: true }); + + if (!cliAvailable) { + expect(result.ok).toBe(false); + return; + } + + expect(result.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(result.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); +}); diff --git a/mcp-min/__tests__/constants.test.js b/mcp-min/__tests__/constants.test.js new file mode 100644 index 000000000..0e807b159 --- /dev/null +++ b/mcp-min/__tests__/constants.test.js @@ -0,0 +1,244 @@ + +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +import constantsListTool from '../constants/list.js'; +import constantsSetTool from '../constants/set.js'; +import constantsUnsetTool from '../constants/unset.js'; + +const mockSettings = { + fetchSettings: (env) => { + if (env === 'staging') { + return { url: 'https://staging.example.com', email: 'test@example.com', token: 'secret123' }; + } + return null; + } +}; + +describe('constants-list', () => { + test('lists constants successfully', async () => { + class MockGateway { + async graph() { + return { + data: { + constants: { + results: [ + { name: 'API_KEY', value: 'abc123', updated_at: '2025-01-01T00:00:00Z' }, + { name: 'SECRET', value: 'xyz789', updated_at: '2025-01-02T00:00:00Z' } + ] + } + } + }; + } + } + + const res = await constantsListTool.handler( + { env: 'staging' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.constants).toHaveLength(2); + expect(res.data.constants[0].name).toBe('API_KEY'); + expect(res.data.constants[0].value).toBe('abc123'); + expect(res.data.count).toBe(2); + }); + + test('returns empty list when no constants', async () => { + class MockGateway { + async graph() { + return { data: { constants: { results: [] } } }; + } + } + + const res = await constantsListTool.handler( + { env: 'staging' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.constants).toHaveLength(0); + expect(res.data.count).toBe(0); + }); + + test('returns error when env not found', async () => { + const res = await constantsListTool.handler( + { env: 'unknown' }, + { settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('CONSTANTS_LIST_FAILED'); + expect(res.error.message).toContain('unknown'); + }); + + test('handles GraphQL errors', async () => { + class MockGateway { + async graph() { + return { errors: [{ message: 'Unauthorized' }] }; + } + } + + const res = await constantsListTool.handler( + { env: 'staging' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('GRAPHQL_ERROR'); + expect(res.error.message).toBe('Unauthorized'); + }); + + test('has correct schema', () => { + expect(constantsListTool.inputSchema.required).toContain('env'); + }); +}); + +describe('constants-set', () => { + test('sets constant successfully', async () => { + let capturedQuery; + class MockGateway { + async graph({ query }) { + capturedQuery = query; + return { + data: { + constant_set: { name: 'API_KEY', value: 'newvalue' } + } + }; + } + } + + const res = await constantsSetTool.handler( + { env: 'staging', name: 'API_KEY', value: 'newvalue' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.name).toBe('API_KEY'); + expect(res.data.value).toBe('newvalue'); + expect(capturedQuery).toContain('constant_set'); + expect(capturedQuery).toContain('API_KEY'); + }); + + test('escapes quotes in name and value', async () => { + let capturedQuery; + class MockGateway { + async graph({ query }) { + capturedQuery = query; + return { data: { constant_set: { name: 'test', value: 'val' } } }; + } + } + + await constantsSetTool.handler( + { env: 'staging', name: 'KEY"WITH"QUOTES', value: 'value"here' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(capturedQuery).toContain('\\"'); + }); + + test('returns error when env not found', async () => { + const res = await constantsSetTool.handler( + { env: 'unknown', name: 'KEY', value: 'val' }, + { settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('CONSTANTS_SET_FAILED'); + }); + + test('handles GraphQL errors', async () => { + class MockGateway { + async graph() { + return { errors: [{ message: 'Invalid name' }] }; + } + } + + const res = await constantsSetTool.handler( + { env: 'staging', name: 'BAD', value: 'val' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('GRAPHQL_ERROR'); + }); + + test('has correct schema', () => { + expect(constantsSetTool.inputSchema.required).toContain('env'); + expect(constantsSetTool.inputSchema.required).toContain('name'); + expect(constantsSetTool.inputSchema.required).toContain('value'); + }); +}); + +describe('constants-unset', () => { + test('deletes constant successfully', async () => { + let capturedQuery; + class MockGateway { + async graph({ query }) { + capturedQuery = query; + return { + data: { + constant_unset: { name: 'OLD_KEY' } + } + }; + } + } + + const res = await constantsUnsetTool.handler( + { env: 'staging', name: 'OLD_KEY' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.name).toBe('OLD_KEY'); + expect(res.data.deleted).toBe(true); + expect(capturedQuery).toContain('constant_unset'); + }); + + test('handles non-existent constant', async () => { + class MockGateway { + async graph() { + return { data: { constant_unset: null } }; + } + } + + const res = await constantsUnsetTool.handler( + { env: 'staging', name: 'NONEXISTENT' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.name).toBe('NONEXISTENT'); + expect(res.data.deleted).toBe(false); + }); + + test('returns error when env not found', async () => { + const res = await constantsUnsetTool.handler( + { env: 'unknown', name: 'KEY' }, + { settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('CONSTANTS_UNSET_FAILED'); + }); + + test('handles GraphQL errors', async () => { + class MockGateway { + async graph() { + return { errors: [{ message: 'Forbidden' }] }; + } + } + + const res = await constantsUnsetTool.handler( + { env: 'staging', name: 'KEY' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('GRAPHQL_ERROR'); + }); + + test('has correct schema', () => { + expect(constantsUnsetTool.inputSchema.required).toContain('env'); + expect(constantsUnsetTool.inputSchema.required).toContain('name'); + }); +}); diff --git a/mcp-min/__tests__/data.clean.test.js b/mcp-min/__tests__/data.clean.test.js new file mode 100644 index 000000000..2c6624856 --- /dev/null +++ b/mcp-min/__tests__/data.clean.test.js @@ -0,0 +1,196 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +// Mock the pos-cli libs before importing tools +vi.mock('../../lib/files', () => ({ + default: { getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) }, + getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) +})); + +vi.mock('../../lib/settings', () => ({ + default: { fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) }, + fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) +})); + +describe('data-clean tools', () => { + let dataCleanTool; + let dataCleanStatusTool; + + beforeAll(async () => { + const cleanModule = await import('../data/clean.js'); + const statusModule = await import('../data/clean-status.js'); + dataCleanTool = cleanModule.default; + dataCleanStatusTool = statusModule.default; + }); + + describe('data-clean', () => { + test('has correct description and inputSchema', () => { + expect(dataCleanTool.description).toContain('clean'); + expect(dataCleanTool.description).toContain('DESTRUCTIVE'); + expect(dataCleanTool.inputSchema.properties).toHaveProperty('env'); + expect(dataCleanTool.inputSchema.properties).toHaveProperty('confirmation'); + expect(dataCleanTool.inputSchema.properties).toHaveProperty('includeSchema'); + expect(dataCleanTool.inputSchema.required).toContain('confirmation'); + }); + + test('rejects invalid confirmation text', async () => { + const result = await dataCleanTool.handler({ env: 'staging', confirmation: 'wrong' }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('CONFIRMATION_REQUIRED'); + expect(result.error.expected).toBe('CLEAN DATA'); + expect(result.error.received).toBe('wrong'); + }); + + test('rejects missing confirmation', async () => { + const result = await dataCleanTool.handler({ env: 'staging' }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('CONFIRMATION_REQUIRED'); + }); + + test('successfully starts clean with correct confirmation', async () => { + class MockGateway { + dataClean = vi.fn().mockResolvedValue({ id: 'clean-job-123', status: 'pending' }); + } + + const result = await dataCleanTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', confirmation: 'CLEAN DATA' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('clean-job-123'); + expect(result.data.status).toBe('pending'); + expect(result.data.includeSchema).toBe(false); + expect(result.warning).toContain('remove ALL data'); + }); + + test('includes schema warning when includeSchema is true', async () => { + class MockGateway { + dataClean = vi.fn().mockResolvedValue({ id: 'clean-job-456', status: 'pending' }); + } + + const result = await dataCleanTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', confirmation: 'CLEAN DATA', includeSchema: true }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.includeSchema).toBe(true); + expect(result.warning).toContain('schema files'); + }); + + test('handles 422 error (not supported)', async () => { + const error = new Error('Not supported'); + error.statusCode = 422; + + class MockGateway { + dataClean = vi.fn().mockRejectedValue(error); + } + + const result = await dataCleanTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', confirmation: 'CLEAN DATA' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('NOT_SUPPORTED'); + expect(result.error.statusCode).toBe(422); + }); + + test('handles generic errors', async () => { + class MockGateway { + dataClean = vi.fn().mockRejectedValue(new Error('Network error')); + } + + const result = await dataCleanTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', confirmation: 'CLEAN DATA' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('DATA_CLEAN_ERROR'); + expect(result.error.message).toContain('Network error'); + }); + }); + + describe('data-clean-status', () => { + test('has correct description and inputSchema', () => { + expect(dataCleanStatusTool.description).toContain('status'); + expect(dataCleanStatusTool.inputSchema.properties).toHaveProperty('jobId'); + expect(dataCleanStatusTool.inputSchema.required).toContain('jobId'); + }); + + test('returns validation error when jobId not provided', async () => { + const result = await dataCleanStatusTool.handler({ url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('jobId'); + }); + + test('successfully returns status for completed job', async () => { + class MockGateway { + dataCleanStatus = vi.fn().mockResolvedValue({ id: 'job-123', status: { name: 'done' } }); + } + + const result = await dataCleanStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-123' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('job-123'); + expect(result.data.status).toBe('done'); + expect(result.data.done).toBe(true); + expect(result.data.failed).toBe(false); + expect(result.data.pending).toBe(false); + }); + + test('correctly identifies pending status', async () => { + class MockGateway { + dataCleanStatus = vi.fn().mockResolvedValue({ id: 'job-456', status: 'pending' }); + } + + const result = await dataCleanStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-456' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('pending'); + expect(result.data.done).toBe(false); + expect(result.data.pending).toBe(true); + }); + + test('correctly identifies failed status', async () => { + class MockGateway { + dataCleanStatus = vi.fn().mockResolvedValue({ id: 'job-789', status: 'failed' }); + } + + const result = await dataCleanStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-789' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('failed'); + expect(result.data.done).toBe(false); + expect(result.data.failed).toBe(true); + }); + + test('handles errors', async () => { + class MockGateway { + dataCleanStatus = vi.fn().mockRejectedValue(new Error('Not found')); + } + + const result = await dataCleanStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'invalid-job' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('DATA_CLEAN_STATUS_ERROR'); + }); + }); +}); diff --git a/mcp-min/__tests__/data.export.test.js b/mcp-min/__tests__/data.export.test.js new file mode 100644 index 000000000..a23511945 --- /dev/null +++ b/mcp-min/__tests__/data.export.test.js @@ -0,0 +1,185 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +// Mock the pos-cli libs before importing tools +vi.mock('../../lib/files', () => ({ + default: { getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) }, + getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) +})); + +vi.mock('../../lib/settings', () => ({ + default: { fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) }, + fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) +})); + +describe('data-export tools', () => { + let dataExportTool; + let dataExportStatusTool; + + beforeAll(async () => { + const exportModule = await import('../data/export.js'); + const statusModule = await import('../data/export-status.js'); + dataExportTool = exportModule.default; + dataExportStatusTool = statusModule.default; + }); + + describe('data-export', () => { + test('has correct description and inputSchema', () => { + expect(dataExportTool.description).toContain('export'); + expect(dataExportTool.inputSchema.properties).toHaveProperty('env'); + expect(dataExportTool.inputSchema.properties).toHaveProperty('exportInternalIds'); + expect(dataExportTool.inputSchema.properties).toHaveProperty('zip'); + }); + + test('successfully starts JSON export', async () => { + class MockGateway { + dataExportStart = vi.fn().mockResolvedValue({ id: 'export-job-123', status: 'pending' }); + } + + const result = await dataExportTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('export-job-123'); + expect(result.data.status).toBe('pending'); + expect(result.data.isZip).toBe(false); + }); + + test('successfully starts ZIP export', async () => { + class MockGateway { + dataExportStart = vi.fn().mockResolvedValue({ id: 'export-zip-456', status: 'pending' }); + } + + const result = await dataExportTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', zip: true }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('export-zip-456'); + expect(result.data.isZip).toBe(true); + }); + + test('handles 404 error (not supported)', async () => { + const error = new Error('Not found'); + error.statusCode = 404; + + class MockGateway { + dataExportStart = vi.fn().mockRejectedValue(error); + } + + const result = await dataExportTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('NOT_SUPPORTED'); + }); + + test('handles generic errors', async () => { + class MockGateway { + dataExportStart = vi.fn().mockRejectedValue(new Error('Network error')); + } + + const result = await dataExportTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('DATA_EXPORT_ERROR'); + }); + }); + + describe('data-export-status', () => { + test('has correct description and inputSchema', () => { + expect(dataExportStatusTool.description).toContain('status'); + expect(dataExportStatusTool.inputSchema.properties).toHaveProperty('jobId'); + expect(dataExportStatusTool.inputSchema.required).toContain('jobId'); + }); + + test('returns validation error when jobId not provided', async () => { + const result = await dataExportStatusTool.handler({ url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + }); + + test('returns pending status', async () => { + class MockGateway { + dataExportStatus = vi.fn().mockResolvedValue({ id: 'job-123', status: 'pending' }); + } + + const result = await dataExportStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-123' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('pending'); + expect(result.data.pending).toBe(true); + expect(result.data.done).toBe(false); + }); + + test('returns completed JSON export with data', async () => { + class MockGateway { + dataExportStatus = vi.fn().mockResolvedValue({ + id: 'job-456', + status: 'done', + data: { + users: { results: [{ id: 1, email: 'test@example.com' }] }, + transactables: { results: [] }, + models: { results: [{ id: 2, name: 'product' }] } + } + }); + } + + const result = await dataExportStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-456' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('done'); + expect(result.data.done).toBe(true); + expect(result.data.exportedData.users).toHaveLength(1); + expect(result.data.exportedData.models).toHaveLength(1); + }); + + test('returns completed ZIP export with download URL', async () => { + class MockGateway { + dataExportStatus = vi.fn().mockResolvedValue({ + id: 'job-789', + status: 'done', + zip_file_url: 'https://s3.example.com/export.zip' + }); + } + + const result = await dataExportStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-789', isZip: true }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('done'); + expect(result.data.zipFileUrl).toBe('https://s3.example.com/export.zip'); + }); + + test('handles failed status', async () => { + class MockGateway { + dataExportStatus = vi.fn().mockResolvedValue({ id: 'job-fail', status: 'failed' }); + } + + const result = await dataExportStatusTool.handler( + { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token', jobId: 'job-fail' }, + { Gateway: MockGateway } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('failed'); + expect(result.data.failed).toBe(true); + }); + }); +}); diff --git a/mcp-min/__tests__/data.import.test.js b/mcp-min/__tests__/data.import.test.js new file mode 100644 index 000000000..0c867bc3a --- /dev/null +++ b/mcp-min/__tests__/data.import.test.js @@ -0,0 +1,375 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; + +import dataImportTool from '../data/import.js'; +import dataImportStatusTool from '../data/import-status.js'; + +const mockSettings = { + fetchSettings: (env) => { + if (env === 'staging') { + return { url: 'https://staging.example.com', email: 'test@example.com', token: 'test-token' }; + } + return null; + } +}; + +describe('data-import tool', () => { + describe('data-import', () => { + test('has correct description and inputSchema', () => { + expect(dataImportTool.description).toContain('Import data'); + expect(dataImportTool.inputSchema.properties).toHaveProperty('env'); + expect(dataImportTool.inputSchema.properties).toHaveProperty('filePath'); + expect(dataImportTool.inputSchema.properties).toHaveProperty('jsonData'); + expect(dataImportTool.inputSchema.properties).toHaveProperty('zipFileUrl'); + expect(dataImportTool.inputSchema.required).toContain('env'); + }); + + test('returns error when env not found', async () => { + const result = await dataImportTool.handler( + { env: 'unknown', jsonData: { records: [] } }, + { settings: mockSettings } + ); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('DATA_IMPORT_ERROR'); + expect(result.error.message).toContain('unknown'); + }); + + test('returns validation error when no data source provided', async () => { + const result = await dataImportTool.handler( + { env: 'staging' }, + { settings: mockSettings } + ); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('Provide one of'); + }); + + test('returns validation error when multiple data sources provided', async () => { + const result = await dataImportTool.handler( + { + env: 'staging', + jsonData: { records: [] }, + zipFileUrl: 'https://example.com/data.zip' + }, + { settings: mockSettings } + ); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('only one'); + }); + + test('successfully starts import with jsonData', async () => { + class MockGateway { + async getInstance() { return { id: 'instance-123' }; } + async dataImportStart(formData) { + // Verify it's using ZIP format + expect(formData.zip_file_url).toBeDefined(); + return { id: 'import-job-789', status: 'pending' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://s3.example.com/access/data.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue(true); + + const result = await dataImportTool.handler( + { env: 'staging', jsonData: { records: [{ id: '1', properties: { name: 'test' }, model_schema: 'todo' }] }, validate: false }, + { Gateway: MockGateway, settings: mockSettings, presignUrl: mockPresignUrl, uploadFile: mockUploadFile } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('import-job-789'); + expect(result.data.status).toBe('pending'); + expect(mockPresignUrl).toHaveBeenCalled(); + expect(mockUploadFile).toHaveBeenCalled(); + }); + + test('successfully starts import with zipFileUrl', async () => { + class MockGateway { + async dataImportStart(formData) { + expect(formData.zip_file_url).toBe('https://example.com/data.zip'); + return { id: 'import-job-zip', status: 'pending' }; + } + } + + const result = await dataImportTool.handler( + { env: 'staging', zipFileUrl: 'https://example.com/data.zip' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('import-job-zip'); + }); + + test('returns error when file not found', async () => { + const result = await dataImportTool.handler( + { env: 'staging', filePath: '/nonexistent/file.json' }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('FILE_NOT_FOUND'); + }); + + test('blocks import when validation fails (default)', async () => { + const result = await dataImportTool.handler( + { + env: 'staging', + jsonData: { + records: [{ id: 'test-id', properties: {}, created_at: 'invalid-date' }] + } + }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_FAILED'); + expect(result.error.details).toBeDefined(); + expect(result.error.details[0].errors.some(e => e.code === 'INVALID_DATETIME')).toBe(true); + }); + + test('rejects jsonData with invalid top-level structure (empty object)', async () => { + const result = await dataImportTool.handler( + { env: 'staging', jsonData: {} }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('records'); + expect(result.error.message).toContain('users'); + }); + + test('rejects jsonData with unknown top-level keys only', async () => { + const result = await dataImportTool.handler( + { env: 'staging', jsonData: { items: [], data: [] } }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('must contain at least one of'); + }); + + test('rejects jsonData with mix of valid and unknown top-level keys', async () => { + const result = await dataImportTool.handler( + { env: 'staging', jsonData: { records: [], extra: 'data' } }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('Unknown top-level keys'); + expect(result.error.message).toContain('extra'); + }); + + test('accepts jsonData with only users key', async () => { + class MockGateway { + async getInstance() { return { id: 'instance-123' }; } + async dataImportStart() { return { id: 'import-users', status: 'pending' }; } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://s3.example.com/access/data.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue(true); + + const result = await dataImportTool.handler( + { env: 'staging', jsonData: { users: [] } }, + { Gateway: MockGateway, settings: mockSettings, presignUrl: mockPresignUrl, uploadFile: mockUploadFile } + ); + + expect(result.ok).toBe(true); + }); + + test('accepts jsonData with both records and users keys', async () => { + class MockGateway { + async getInstance() { return { id: 'instance-123' }; } + async dataImportStart() { return { id: 'import-both', status: 'pending' }; } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://s3.example.com/access/data.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue(true); + + const result = await dataImportTool.handler( + { env: 'staging', jsonData: { records: [], users: [] } }, + { Gateway: MockGateway, settings: mockSettings, presignUrl: mockPresignUrl, uploadFile: mockUploadFile } + ); + + expect(result.ok).toBe(true); + }); + + test('rejects JSON file with invalid top-level structure', async () => { + const tmpFile = path.join(os.tmpdir(), 'test-invalid-structure.json'); + fs.writeFileSync(tmpFile, JSON.stringify({ items: [{ id: 1 }] })); + + try { + const result = await dataImportTool.handler( + { env: 'staging', filePath: tmpFile }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + } finally { + fs.unlinkSync(tmpFile); + } + }); + + test('skips validation when validate: false', async () => { + class MockGateway { + async getInstance() { return { id: 'instance-123' }; } + async dataImportStart() { + return { id: 'import-no-validate', status: 'pending' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://s3.example.com/access/data.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue(true); + + const result = await dataImportTool.handler( + { + env: 'staging', + jsonData: { records: [{ id: 'invalid-uuid' }] }, + validate: false + }, + { Gateway: MockGateway, settings: mockSettings, presignUrl: mockPresignUrl, uploadFile: mockUploadFile } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('import-no-validate'); + }); + + test('blocks import from JSON file when validation fails', async () => { + // Create temp JSON file with missing upload versions + const tmpFile = path.join(os.tmpdir(), 'test-invalid-data.json'); + const invalidData = { + records: [{ + id: 'test-1', + type: 'photo', + properties: { + title: 'Test', + image: { + path: 'photos/test.jpg' + // Missing required versions + } + }, + created_at: '2025-01-01T00:00:00Z', + updated_at: '2025-01-01T00:00:00Z' + }] + }; + fs.writeFileSync(tmpFile, JSON.stringify(invalidData)); + + try { + const result = await dataImportTool.handler( + { + env: 'staging', + filePath: tmpFile, + appPath: path.join(process.cwd(), 'examples') + }, + { settings: mockSettings } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_FAILED'); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_VERSION')).toBe(true); + } finally { + fs.unlinkSync(tmpFile); + } + }); + + test('handles ZIP file path', async () => { + // Create temp ZIP file + const tmpFile = path.join(os.tmpdir(), 'test-data.zip'); + fs.writeFileSync(tmpFile, 'fake zip content'); + + try { + class MockGateway { + async getInstance() { return { id: 'instance-123' }; } + async dataImportStart(formData) { + expect(formData.zip_file_url).toContain('s3.example.com'); + return { id: 'import-zip-file', status: 'pending' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://s3.example.com/access/data.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue(true); + + const result = await dataImportTool.handler( + { env: 'staging', filePath: tmpFile }, + { Gateway: MockGateway, settings: mockSettings, presignUrl: mockPresignUrl, uploadFile: mockUploadFile } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('import-zip-file'); + } finally { + fs.unlinkSync(tmpFile); + } + }); + }); + + describe('data-import-status', () => { + test('has correct description and inputSchema', () => { + expect(dataImportStatusTool.description).toContain('status'); + expect(dataImportStatusTool.inputSchema.properties).toHaveProperty('jobId'); + expect(dataImportStatusTool.inputSchema.required).toContain('jobId'); + }); + + test('returns validation error when jobId not provided', async () => { + const result = await dataImportStatusTool.handler( + { env: 'staging' }, + { settings: mockSettings } + ); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('jobId'); + }); + + test('successfully returns status for completed job', async () => { + class MockGateway { + async dataImportStatus() { + return { id: 'job-123', status: { name: 'done' } }; + } + } + + const result = await dataImportStatusTool.handler( + { env: 'staging', jobId: 'job-123' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('job-123'); + expect(result.data.status).toBe('done'); + }); + + test('correctly identifies pending status', async () => { + class MockGateway { + async dataImportStatus() { + return { id: 'job-456', status: 'pending' }; + } + } + + const result = await dataImportStatusTool.handler( + { env: 'staging', jobId: 'job-456' }, + { Gateway: MockGateway, settings: mockSettings } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('pending'); + }); + }); +}); diff --git a/mcp-min/__tests__/data.validate.test.js b/mcp-min/__tests__/data.validate.test.js new file mode 100644 index 000000000..cdcee1b6c --- /dev/null +++ b/mcp-min/__tests__/data.validate.test.js @@ -0,0 +1,706 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; +import path from 'path'; +import fs from 'fs'; +import os from 'os'; + +import { + validateRecords, + isValidDatetime, + matchesSchemaType, + validateJsonStructure +} from '../data/validate.js'; +import { + loadSchema, + loadAllSchemas, + parseSchemaYaml, + getUploadVersions +} from '../data/validate-schemas.js'; +import dataValidateTool from '../data/validate-tool.js'; + +// Path to example schemas +const EXAMPLES_APP_PATH = path.join(process.cwd(), 'examples', 'app'); + +// Sample valid record +const validRecord = { + id: '550e8400-e29b-41d4-a716-446655440000', + type: 'photo', + properties: { + title: 'Sunset', + description: 'A beautiful sunset', + tags: ['nature', 'sunset'], + file_size: 1024, + imported_at: '2025-01-01T00:00:00Z', + image: { + path: 'photos/sunset.jpg', + versions: { + thumbnail: 'photos/sunset_thumb.jpg', + medium: 'photos/sunset_med.jpg', + large: 'photos/sunset_lg.jpg', + full: 'photos/sunset_full.jpg' + } + } + }, + created_at: '2025-01-01T00:00:00Z', + updated_at: '2025-01-01T00:00:00Z' +}; + +describe('validate-schemas', () => { + describe('parseSchemaYaml', () => { + test('parses basic schema with name and properties', () => { + const yaml = ` +name: test +properties: + - name: title + type: string + - name: count + type: integer +`; + const schema = parseSchemaYaml(yaml); + expect(schema.name).toBe('test'); + expect(schema.properties).toHaveLength(2); + expect(schema.properties[0]).toEqual({ name: 'title', type: 'string' }); + expect(schema.properties[1]).toEqual({ name: 'count', type: 'integer' }); + }); + + test('parses schema with upload type and versions', () => { + const yaml = ` +name: photo +properties: + - name: image + type: upload + options: + versions: + - name: thumbnail + - name: large +`; + const schema = parseSchemaYaml(yaml); + expect(schema.name).toBe('photo'); + expect(schema.properties[0].type).toBe('upload'); + expect(schema.properties[0].options.versions).toEqual([ + { name: 'thumbnail' }, + { name: 'large' } + ]); + }); + + test('handles empty properties', () => { + const yaml = ` +name: empty +properties: +`; + const schema = parseSchemaYaml(yaml); + expect(schema.name).toBe('empty'); + expect(schema.properties).toEqual([]); + }); + }); + + describe('loadSchema', () => { + test('loads schema from examples/app/schema', () => { + const schema = loadSchema('photo', path.join(process.cwd(), 'examples')); + expect(schema).not.toBeNull(); + expect(schema.name).toBe('photo'); + expect(schema.properties.length).toBeGreaterThan(0); + }); + + test('returns null for non-existent schema', () => { + const schema = loadSchema('nonexistent', EXAMPLES_APP_PATH); + expect(schema).toBeNull(); + }); + }); + + describe('loadAllSchemas', () => { + test('loads all schemas from examples app', () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + expect(schemas.size).toBeGreaterThan(0); + expect(schemas.has('photo')).toBe(true); + }); + + test('returns empty map for non-existent directory', () => { + const schemas = loadAllSchemas('/nonexistent/path'); + expect(schemas.size).toBe(0); + }); + }); + + describe('getUploadVersions', () => { + test('extracts version names from upload property', () => { + const prop = { + type: 'upload', + options: { + versions: [ + { name: 'thumbnail' }, + { name: 'large' } + ] + } + }; + const versions = getUploadVersions(prop); + expect(versions).toEqual(['thumbnail', 'large']); + }); + + test('returns empty array for non-upload type', () => { + const prop = { type: 'string' }; + expect(getUploadVersions(prop)).toEqual([]); + }); + + test('returns empty array for upload without versions', () => { + const prop = { type: 'upload' }; + expect(getUploadVersions(prop)).toEqual([]); + }); + }); +}); + +describe('validate helpers', () => { + describe('isValidDatetime', () => { + test('accepts valid ISO 8601 datetimes', () => { + expect(isValidDatetime('2025-01-01T00:00:00Z')).toBe(true); + expect(isValidDatetime('2025-01-01T12:30:45.123Z')).toBe(true); + expect(isValidDatetime('2025-01-01')).toBe(true); + expect(isValidDatetime('2025-01-01T00:00:00+00:00')).toBe(true); + }); + + test('rejects invalid datetimes', () => { + expect(isValidDatetime('not-a-date')).toBe(false); + expect(isValidDatetime('01/01/2025')).toBe(false); + expect(isValidDatetime('')).toBe(false); + expect(isValidDatetime(123)).toBe(false); + }); + }); + + describe('matchesSchemaType', () => { + test('string type', () => { + expect(matchesSchemaType('hello', 'string')).toBe(true); + expect(matchesSchemaType(123, 'string')).toBe(false); + }); + + test('text type (alias for string)', () => { + expect(matchesSchemaType('hello', 'text')).toBe(true); + expect(matchesSchemaType(123, 'text')).toBe(false); + }); + + test('integer type', () => { + expect(matchesSchemaType(42, 'integer')).toBe(true); + expect(matchesSchemaType(42.5, 'integer')).toBe(false); + expect(matchesSchemaType('42', 'integer')).toBe(false); + }); + + test('float type', () => { + expect(matchesSchemaType(42.5, 'float')).toBe(true); + expect(matchesSchemaType(42, 'float')).toBe(true); + expect(matchesSchemaType('42.5', 'float')).toBe(false); + }); + + test('boolean type', () => { + expect(matchesSchemaType(true, 'boolean')).toBe(true); + expect(matchesSchemaType(false, 'boolean')).toBe(true); + expect(matchesSchemaType('true', 'boolean')).toBe(false); + }); + + test('datetime type', () => { + expect(matchesSchemaType('2025-01-01T00:00:00Z', 'datetime')).toBe(true); + expect(matchesSchemaType('invalid', 'datetime')).toBe(false); + }); + + test('array type', () => { + expect(matchesSchemaType([], 'array')).toBe(true); + expect(matchesSchemaType(['a', 'b'], 'array')).toBe(true); + expect(matchesSchemaType({}, 'array')).toBe(false); + }); + + test('upload type', () => { + expect(matchesSchemaType({ path: 'file.jpg' }, 'upload')).toBe(true); + expect(matchesSchemaType({}, 'upload')).toBe(true); + expect(matchesSchemaType('file.jpg', 'upload')).toBe(false); + }); + + test('null values are allowed', () => { + expect(matchesSchemaType(null, 'string')).toBe(true); + expect(matchesSchemaType(undefined, 'integer')).toBe(true); + }); + }); +}); + +describe('validateJsonStructure', () => { + test('accepts object with records key', () => { + const result = validateJsonStructure({ records: [] }); + expect(result.ok).toBe(true); + }); + + test('accepts object with users key', () => { + const result = validateJsonStructure({ users: [] }); + expect(result.ok).toBe(true); + }); + + test('accepts object with both records and users keys', () => { + const result = validateJsonStructure({ records: [], users: [] }); + expect(result.ok).toBe(true); + }); + + test('rejects null', () => { + const result = validateJsonStructure(null); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + }); + + test('rejects array', () => { + const result = validateJsonStructure([{ id: 1 }]); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + }); + + test('rejects empty object', () => { + const result = validateJsonStructure({}); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('must contain at least one of'); + }); + + test('rejects object with only unknown keys', () => { + const result = validateJsonStructure({ items: [], data: {} }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('must contain at least one of'); + }); + + test('rejects object with mix of valid and unknown keys', () => { + const result = validateJsonStructure({ records: [], extra: 'data' }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('Unknown top-level keys'); + expect(result.error.message).toContain('extra'); + }); + + test('rejects primitive values', () => { + expect(validateJsonStructure('string').ok).toBe(false); + expect(validateJsonStructure(123).ok).toBe(false); + expect(validateJsonStructure(undefined).ok).toBe(false); + }); +}); + +describe('validateRecords', () => { + describe('required field validation', () => { + test('validates complete valid record', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const result = await validateRecords([validRecord], { schemas }); + expect(result.ok).toBe(true); + expect(result.data.valid).toBe(true); + expect(result.data.recordsValidated).toBe(1); + }); + + test('fails on missing id', async () => { + const record = { ...validRecord }; + delete record.id; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_FAILED'); + expect(result.error.details[0].errors[0].code).toBe('MISSING_ID'); + }); + + test('accepts any non-empty id value', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { ...validRecord, id: 'any-string-value' }; + const result = await validateRecords([record], { schemas }); + expect(result.ok).toBe(true); + }); + + test('fails on missing type', async () => { + const record = { ...validRecord }; + delete record.type; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_TYPE')).toBe(true); + }); + + test('fails on missing properties', async () => { + const record = { ...validRecord }; + delete record.properties; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_PROPERTIES')).toBe(true); + }); + + test('fails on non-object properties', async () => { + const record = { ...validRecord, properties: 'not an object' }; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'INVALID_PROPERTIES')).toBe(true); + }); + + test('fails on missing created_at', async () => { + const record = { ...validRecord }; + delete record.created_at; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_CREATED_AT')).toBe(true); + }); + + test('fails on invalid created_at format', async () => { + const record = { ...validRecord, created_at: 'not-a-date' }; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => + e.code === 'INVALID_DATETIME' && e.field === 'created_at' + )).toBe(true); + }); + + test('fails on missing updated_at', async () => { + const record = { ...validRecord }; + delete record.updated_at; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_UPDATED_AT')).toBe(true); + }); + }); + + describe('schema validation', () => { + test('fails on unknown type when schema not found', async () => { + const schemas = new Map(); // Empty schemas + const record = { ...validRecord, type: 'nonexistent' }; + const result = await validateRecords([record], { schemas }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'UNKNOWN_TYPE')).toBe(true); + }); + + test('fails on unknown property with strictProperties', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + unknown_field: 'value' + } + }; + const result = await validateRecords([record], { schemas, strictProperties: true }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'UNKNOWN_PROPERTY')).toBe(true); + }); + + test('passes with unknown property when strictProperties is false', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + unknown_field: 'value' + } + }; + const result = await validateRecords([record], { schemas, strictProperties: false }); + expect(result.ok).toBe(true); + }); + + test('fails on type mismatch', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + file_size: 'not an integer' // Should be integer + } + }; + const result = await validateRecords([record], { schemas, strictTypes: true }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'TYPE_MISMATCH')).toBe(true); + }); + + test('passes type mismatch when strictTypes is false', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + file_size: 'not an integer' + } + }; + const result = await validateRecords([record], { schemas, strictTypes: false }); + expect(result.ok).toBe(true); + }); + }); + + describe('upload version validation', () => { + test('fails on invalid upload version', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + image: { + path: 'photos/sunset.jpg', + versions: { + thumbnail: 'photos/sunset_thumb.jpg', + invalid_version: 'photos/invalid.jpg' + } + } + } + }; + const result = await validateRecords([record], { schemas, strictTypes: true }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'INVALID_VERSIONS')).toBe(true); + }); + + test('fails on missing upload versions', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + image: { + path: 'photos/sunset.jpg', + versions: { + thumbnail: 'photos/sunset_thumb.jpg' + // Missing: medium, large, full + } + } + } + }; + const result = await validateRecords([record], { schemas, strictTypes: true }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_VERSION')).toBe(true); + }); + + test('fails when versions object is omitted but schema defines versions', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const record = { + ...validRecord, + properties: { + ...validRecord.properties, + image: { + path: 'photos/sunset.jpg' + // versions object completely omitted + } + } + }; + const result = await validateRecords([record], { schemas, strictTypes: true }); + expect(result.ok).toBe(false); + expect(result.error.details[0].errors.some(e => e.code === 'MISSING_VERSION')).toBe(true); + }); + + test('passes when all required versions are provided', async () => { + const schemas = loadAllSchemas(path.join(process.cwd(), 'examples')); + const result = await validateRecords([validRecord], { schemas, strictTypes: true }); + expect(result.ok).toBe(true); + }); + }); + + describe('error limits and aggregation', () => { + test('validates empty array successfully', async () => { + const result = await validateRecords([]); + expect(result.ok).toBe(true); + expect(result.data.recordsValidated).toBe(0); + }); + + test('respects maxErrors limit', async () => { + const badRecords = Array.from({ length: 50 }, () => ({ + // Missing all required fields + })); + const result = await validateRecords(badRecords, { maxErrors: 10 }); + expect(result.ok).toBe(false); + // Should stop before processing all records + const totalErrors = result.error.details.reduce((sum, d) => sum + d.errors.length, 0); + expect(totalErrors).toBeLessThanOrEqual(15); // Some buffer for multiple errors per record + }); + + test('collects errors from multiple records', async () => { + const records = [ + { id: 'invalid-uuid' }, + { type: 'test' }, + {} + ]; + const result = await validateRecords(records); + expect(result.ok).toBe(false); + expect(result.error.details.length).toBe(3); + }); + + test('returns error for non-array input', async () => { + const result = await validateRecords('not an array'); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_INPUT'); + }); + }); + + describe('error response format', () => { + test('error details include recordIndex, recordId, recordType', async () => { + const record = { id: 'invalid', type: 'test' }; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + const detail = result.error.details[0]; + expect(detail.recordIndex).toBe(0); + expect(detail.recordId).toBe('invalid'); + expect(detail.recordType).toBe('test'); + }); + + test('error details include field, code, message, value', async () => { + const record = { id: 'test', created_at: 'invalid-date' }; + const result = await validateRecords([record]); + expect(result.ok).toBe(false); + const error = result.error.details[0].errors.find(e => e.code === 'INVALID_DATETIME'); + expect(error.field).toBe('created_at'); + expect(error.code).toBe('INVALID_DATETIME'); + expect(error.message).toContain('Invalid datetime'); + expect(error.value).toBe('invalid-date'); + }); + }); +}); + +describe('data-validate tool', () => { + test('has correct description and inputSchema', () => { + expect(dataValidateTool.description).toContain('Validate'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('env'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('filePath'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('jsonData'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('appPath'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('strictTypes'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('strictProperties'); + expect(dataValidateTool.inputSchema.properties).toHaveProperty('maxErrors'); + }); + + test('returns error when no data source provided', async () => { + const result = await dataValidateTool.handler({ env: 'staging' }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('Provide one of'); + }); + + test('returns error when multiple data sources provided', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + filePath: '/path/to/file.json', + jsonData: { records: [] } + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_ERROR'); + expect(result.error.message).toContain('only one'); + }); + + test('returns error when file not found', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + filePath: '/nonexistent/file.json' + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('FILE_NOT_FOUND'); + }); + + test('validates jsonData with valid records', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [validRecord] }, + appPath: path.join(process.cwd(), 'examples') + }); + expect(result.ok).toBe(true); + expect(result.data.valid).toBe(true); + expect(result.data.recordsValidated).toBe(1); + }); + + test('validates jsonData with invalid records', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [{ id: 'invalid' }] } + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('VALIDATION_FAILED'); + }); + + test('validates file with valid JSON', async () => { + const tmpFile = path.join(os.tmpdir(), 'test-validate.json'); + fs.writeFileSync(tmpFile, JSON.stringify({ records: [validRecord] })); + + try { + const result = await dataValidateTool.handler({ + env: 'staging', + filePath: tmpFile, + appPath: path.join(process.cwd(), 'examples') + }); + expect(result.ok).toBe(true); + } finally { + fs.unlinkSync(tmpFile); + } + }); + + test('returns error for invalid JSON file', async () => { + const tmpFile = path.join(os.tmpdir(), 'test-invalid.json'); + fs.writeFileSync(tmpFile, 'not json'); + + try { + const result = await dataValidateTool.handler({ + env: 'staging', + filePath: tmpFile + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_JSON'); + } finally { + fs.unlinkSync(tmpFile); + } + }); + + test('returns error when records is not an array', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: 'not an array' } + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_FORMAT'); + }); + + test('includes meta timestamps', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [] } + }); + expect(result.meta).toBeDefined(); + expect(result.meta.startedAt).toBeDefined(); + expect(result.meta.finishedAt).toBeDefined(); + }); + + test('respects strictProperties option', async () => { + const recordWithUnknown = { + ...validRecord, + properties: { + ...validRecord.properties, + unknown_field: 'value' + } + }; + + // Should pass without strictProperties + const resultLax = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [recordWithUnknown] }, + appPath: path.join(process.cwd(), 'examples'), + strictProperties: false + }); + expect(resultLax.ok).toBe(true); + + // Should fail with strictProperties + const resultStrict = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [recordWithUnknown] }, + appPath: path.join(process.cwd(), 'examples'), + strictProperties: true + }); + expect(resultStrict.ok).toBe(false); + }); + + test('rejects jsonData with invalid top-level structure', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { items: [] } + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.meta).toBeDefined(); + }); + + test('rejects jsonData with mix of valid and unknown keys', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { records: [], extra: 'data' } + }); + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_STRUCTURE'); + expect(result.error.message).toContain('Unknown top-level keys'); + }); + + test('accepts jsonData with only users key', async () => { + const result = await dataValidateTool.handler({ + env: 'staging', + jsonData: { users: [] } + }); + expect(result.ok).toBe(true); + }); +}); diff --git a/mcp-min/__tests__/generators.tools.test.js b/mcp-min/__tests__/generators.tools.test.js new file mode 100644 index 000000000..8b223a71c --- /dev/null +++ b/mcp-min/__tests__/generators.tools.test.js @@ -0,0 +1,56 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +// NOTE: keep imports single; file previously duplicated import lines causing parse error + +const dummyInstance = { + description: 'Generate something', + _arguments: [{ name: 'name' }], + argumentsHelp: () => ' name # Name of thing', + _options: [{ name: 'force', alias: 'f', description: 'force', default: '' }] +}; + +class DummyGen {} + +const env = { + register: vi.fn(), + get: vi.fn(() => DummyGen), + instantiate: vi.fn(() => dummyInstance), + run: vi.fn(async () => {}) +}; + +const listUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'generators', 'list.js')).href; +const helpUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'generators', 'help.js')).href; +const runUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'generators', 'run.js')).href; + +describe('generators tools', () => { + let listTool, helpTool, runTool; + beforeAll(async () => { + listTool = (await import(listUrl)).default; + helpTool = (await import(helpUrl)).default; + runTool = (await import(runUrl)).default; + }); + + test('list returns generators with required args', async () => { + const res = await listTool.handler({}, { globSync: () => ['modules/core/generators/command/index.js'], yeomanEnv: env }); + expect(res.generators[0].name).toBe('command'); + expect(Array.isArray(res.generators[0].required)).toBe(true); + }); + + test('help returns usage, required and optional args, and options', async () => { + const res = await helpTool.handler({ generatorPath: 'modules/core/generators/command' }, { yeomanEnv: env }); + expect(res.name).toBe('command'); + expect(res.usage).toMatch(/pos-cli generate/); + expect(res.optionsTable).toMatch(/--force/); + expect(Array.isArray(res.requiredArgs)).toBe(true); + expect(Array.isArray(res.optionalArgs)).toBe(true); + }); + + test('run triggers yeoman env run', async () => { + const res = await runTool.handler({ generatorPath: 'modules/core/generators/command', args: ['users/create'], options: { force: true } }, { yeomanEnv: env }); + expect(res.success).toBe(true); + expect(env.run).toHaveBeenCalled(); + }); +}); diff --git a/mcp-min/__tests__/graphql.exec.test.js b/mcp-min/__tests__/graphql.exec.test.js new file mode 100644 index 000000000..dde5c44b1 --- /dev/null +++ b/mcp-min/__tests__/graphql.exec.test.js @@ -0,0 +1,47 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +vi.mock('../../lib/proxy', () => { + class GatewayMock { + constructor() {} + async graph(body) { + if (body.query.includes('throw')) throw new Error('GQL'); + if (body.query.includes('gql_error')) return { errors: [{ message: 'Bad input' }], data: null }; + return { data: { ok: true } }; + } + } + return { default: GatewayMock, __esModule: true }; +}); + +const toolUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'graphql', 'exec.js')).href; + +describe('platformos.graphql.exec', () => { + let tool; + beforeAll(async () => { + const mod = await import(toolUrl); + tool = mod.default; + }); + + test('success returns data', async () => { + class LocalGateway { async graph(body) { return { data: { ok: true } }; } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', query: 'query { ok }' }, { Gateway: LocalGateway }); + expect(res.success).toBe(true); + expect(res.result.data.ok).toBe(true); + }); + + test('returns error object when Gateway throws', async () => { + class LocalGateway { async graph() { throw new Error('GQL'); } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', query: 'throw' }, { Gateway: LocalGateway }); + expect(res.success).toBe(false); + expect(res.error.code).toBe('GRAPHQL_EXEC_ERROR'); + }); + + test('returns error object when GraphQL response contains errors', async () => { + class LocalGateway { async graph() { return { errors: [{ message: 'Bad input' }], data: null }; } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', query: 'gql_error' }, { Gateway: LocalGateway }); + expect(res.success).toBe(false); + expect(res.error.code).toBe('GRAPHQL_EXEC_ERROR'); + }); +}); diff --git a/mcp-min/__tests__/http.test.js b/mcp-min/__tests__/http.test.js new file mode 100644 index 000000000..49f0fa15f --- /dev/null +++ b/mcp-min/__tests__/http.test.js @@ -0,0 +1,137 @@ + +import http from 'http'; +import fs from 'fs'; +import path from 'path'; +import startHttp from '../http-server.js'; +import fixtures from '../../test/utils/fixtures'; + +const PORT = 5930; +let server; + +function httpRequest({ method = 'GET', path = '/', body = null, headers = {} }) { + return new Promise((resolve, reject) => { + const req = http.request( + { hostname: '127.0.0.1', port: PORT, path, method, headers: Object.assign({ 'Content-Type': 'application/json' }, headers) }, + (res) => { + let data = ''; + res.on('data', (chunk) => (data += chunk)); + res.on('end', () => resolve({ status: res.statusCode, body: data, headers: res.headers })); + } + ); + req.on('error', reject); + if (body) req.write(typeof body === 'string' ? body : JSON.stringify(body)); + req.end(); + }); +} + +beforeAll(async () => { + // ensure .pos exists + fixtures.writeDotPos({ staging: { url: 'https://staging.example.com' } }); + server = await startHttp({ port: PORT }); +}); + +afterAll(() => { + if (server) server.close(); + fixtures.removeDotPos(); +}); + +test('GET /health returns ok', async () => { + const res = await httpRequest({ method: 'GET', path: '/health' }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.status).toBe('ok'); +}); + +test('GET /tools lists tools', async () => { + const res = await httpRequest({ method: 'GET', path: '/tools' }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(Array.isArray(parsed.tools)).toBe(true); + expect(parsed.tools.find(t => t.id === 'envs-list')).toBeDefined(); +}); + +test('POST /call returns 400 when tool missing', async () => { + const res = await httpRequest({ method: 'POST', path: '/call', body: {} }); + expect(res.status).toBe(400); + const parsed = JSON.parse(res.body); + expect(parsed.error).toBeDefined(); +}); + +test('POST /call returns 404 for unknown tool', async () => { + const res = await httpRequest({ method: 'POST', path: '/call', body: { tool: 'no-such' } }); + expect(res.status).toBe(404); +}); + +test('POST /call envs-list returns environments', async () => { + const res = await httpRequest({ method: 'POST', path: '/call', body: { tool: 'envs-list', params: {} } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result).toBeDefined(); + expect(Array.isArray(parsed.result.environments)).toBe(true); +}); + +// JSON-RPC initialize path +test('POST /call-stream initialize returns protocol info', async () => { + const res = await httpRequest({ method: 'POST', path: '/call-stream', body: { jsonrpc: '2.0', id: 1, method: 'initialize', params: { protocolVersion: '2025-06-18' } } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result).toBeDefined(); + expect(parsed.result.serverInfo).toBeDefined(); +}); + +// JSON-RPC tools/list +test('POST /call-stream tools/list returns tools array', async () => { + const res = await httpRequest({ method: 'POST', path: '/call-stream', body: { jsonrpc: '2.0', id: 2, method: 'tools/list', params: {} } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result.tools).toBeDefined(); + expect(Array.isArray(parsed.result.tools)).toBe(true); +}); + +// JSON-RPC tools/call -> envs-list +test('POST /call-stream tools/call envs-list returns environments', async () => { + const res = await httpRequest({ method: 'POST', path: '/call-stream', body: { jsonrpc: '2.0', id: 3, method: 'tools/call', params: { name: 'envs-list' } } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result).toBeDefined(); + const content = parsed.result.content; + expect(Array.isArray(content)).toBe(true); +}); + +// JSON-RPC tools/call -> platformos.graphql.exec error should set non-200 +// We simulate by passing bad query that triggers resp.errors handling in tool +// Note: we inject a dummy environment in .pos via fixtures in beforeAll +// The tool will try to read auth from .pos and then fail at Gateway.graph if mocked +// For this HTTP test we rely on real module; we will pass endpoint to a fake URL and expect 500 due to thrown error + +test('POST /call-stream tools/call graphql.exec with GraphQL errors returns 200 with error payload', async () => { + const body = { + jsonrpc: '2.0', + id: 4, + method: 'tools/call', + params: { + name: 'platformos.graphql.exec', + arguments: { url: 'https://example.invalid', email: 'e', token: 't', query: 'gql_error' } + } + }; + const res = await httpRequest({ method: 'POST', path: '/call-stream', body }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.error).toBeDefined(); +}); + +test('POST /call-stream tools/call liquid.exec logical error returns 200 with error payload', async () => { + const body = { + jsonrpc: '2.0', + id: 5, + method: 'tools/call', + params: { + name: 'platformos.liquid.exec', + arguments: { url: 'https://example.invalid', email: 'e', token: 't', template: 'logical_error' } + } + }; + const res = await httpRequest({ method: 'POST', path: '/call-stream', body }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.error).toBeDefined(); +}); diff --git a/mcp-min/__tests__/json-to-csv.test.js b/mcp-min/__tests__/json-to-csv.test.js new file mode 100644 index 000000000..1942ca693 --- /dev/null +++ b/mcp-min/__tests__/json-to-csv.test.js @@ -0,0 +1,137 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; +import { jsonToZipBuffer } from '../data/json-to-csv.js'; +import { createRequire } from 'module'; +import { Readable } from 'stream'; + +const require = createRequire(import.meta.url); +const unzipper = require('unzipper'); + +async function extractZipEntries(buffer) { + const entries = {}; + const directory = await unzipper.Open.buffer(buffer); + + for (const file of directory.files) { + const content = await file.buffer(); + entries[file.path] = content.toString('utf8'); + } + + return entries; +} + +describe('jsonToZipBuffer', () => { + test('converts records to CSV in ZIP', async () => { + const jsonData = { + records: [ + { id: '1', properties: { name: 'Test Item', done: false }, model_schema: 'todo' }, + { id: '2', properties: { name: 'Another Item', done: true }, model_schema: 'todo' } + ] + }; + + const buffer = await jsonToZipBuffer(jsonData); + expect(buffer).toBeInstanceOf(Buffer); + expect(buffer.length).toBeGreaterThan(0); + + const entries = await extractZipEntries(buffer); + expect(entries['records.csv']).toBeDefined(); + + const csv = entries['records.csv']; + expect(csv).toContain('id,user_id,created_at,updated_at,properties,model_schema'); + expect(csv).toContain('todo'); + expect(csv).toContain('Test Item'); + }); + + test('converts users to CSV in ZIP', async () => { + const jsonData = { + users: [ + { id: '1', email: 'test@example.com', first_name: 'John', last_name: 'Doe' }, + { id: '2', email: 'jane@example.com', first_name: 'Jane', last_name: 'Smith' } + ] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + + expect(entries['users.csv']).toBeDefined(); + + const csv = entries['users.csv']; + expect(csv).toContain('id,email'); + expect(csv).toContain('test@example.com'); + expect(csv).toContain('jane@example.com'); + }); + + test('handles both records and users', async () => { + const jsonData = { + records: [{ id: '1', model_schema: 'item' }], + users: [{ id: '1', email: 'user@test.com' }] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + + expect(entries['records.csv']).toBeDefined(); + expect(entries['users.csv']).toBeDefined(); + }); + + test('handles models field as records', async () => { + const jsonData = { + models: [{ id: '1', properties: { title: 'Test' }, model_schema: 'post' }] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + + expect(entries['records.csv']).toBeDefined(); + expect(entries['records.csv']).toContain('post'); + }); + + test('handles transactables field as records', async () => { + const jsonData = { + transactables: [{ id: '1', model_schema: 'order' }] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + + expect(entries['records.csv']).toBeDefined(); + }); + + test('escapes CSV special characters', async () => { + const jsonData = { + records: [ + { id: '1', properties: { description: 'Has "quotes" and, commas' }, model_schema: 'test' } + ] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + const csv = entries['records.csv']; + + // Properties column should contain the JSON with escaped quotes + // The value contains comma so it should be wrapped in quotes + expect(csv).toContain('description'); + expect(csv).toContain('quotes'); + expect(csv).toContain('commas'); + }); + + test('handles empty data', async () => { + const jsonData = {}; + + const buffer = await jsonToZipBuffer(jsonData); + expect(buffer).toBeInstanceOf(Buffer); + + const entries = await extractZipEntries(buffer); + expect(Object.keys(entries).length).toBe(0); + }); + + test('handles type field as model_schema', async () => { + const jsonData = { + records: [{ id: '1', type: 'product', properties: {} }] + }; + + const buffer = await jsonToZipBuffer(jsonData); + const entries = await extractZipEntries(buffer); + const csv = entries['records.csv']; + + expect(csv).toContain('product'); + }); +}); diff --git a/mcp-min/__tests__/liquid.exec.test.js b/mcp-min/__tests__/liquid.exec.test.js new file mode 100644 index 000000000..2ce1a03a8 --- /dev/null +++ b/mcp-min/__tests__/liquid.exec.test.js @@ -0,0 +1,47 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +vi.mock('../../lib/proxy', () => { + class GatewayMock { + constructor() {} + async liquid(body) { + if (body.template.includes('throw')) throw new Error('boom'); + if (body.template.includes('logical_error')) return { result: 'Liquid error', error: "Liquid error: Couldn't find \"questions/search.graphql\"." }; + return { output: 'Hello ' + (body.locals?.name || 'World') }; + } + } + return { default: GatewayMock, __esModule: true }; +}); + +const toolUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'liquid', 'exec.js')).href; + +describe('platformos.liquid.exec', () => { + let tool; + beforeAll(async () => { + const mod = await import(toolUrl); + tool = mod.default; + }); + + test('success path returns result', async () => { + class LocalGateway { async liquid(body) { return { output: 'Hello ' + (body.locals?.name || 'World') }; } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', template: 'Hi {{name}}', locals: { name: 'Bob' } }, { Gateway: LocalGateway }); + expect(res.success).toBe(true); + expect(res.result.output).toMatch(/Hello/); + }); + + test('logical liquid error returns error object', async () => { + class LocalGateway { async liquid(_body) { return { result: 'Liquid error', error: "Liquid error: Couldn't find \"questions/search.graphql\"." }; } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', template: 'logical_error' }, { Gateway: LocalGateway }); + expect(res.success).toBe(false); + expect(String(res.error.message)).toMatch(/Couldn't find/); + }); + + test('Gateway error returns error object', async () => { + class LocalGateway { async liquid() { throw new Error('boom'); } } + const res = await tool.handler({ url: 'https://x', email: 'e', token: 't', template: 'throw' }, { Gateway: LocalGateway }); + expect(res.success).toBe(false); + expect(res.error.code).toBe('LIQUID_EXEC_ERROR'); + }); +}); diff --git a/mcp-min/__tests__/list-envs.test.cjs.js b/mcp-min/__tests__/list-envs.test.cjs.js new file mode 100644 index 000000000..6c5f1cdf6 --- /dev/null +++ b/mcp-min/__tests__/list-envs.test.cjs.js @@ -0,0 +1,70 @@ + +const http = require('http'); +const path = require('path'); +const fs = require('fs'); + +let server; + +const PORT = 5921; +const CONFIG_FILE = path.resolve(`.pos.test-${PORT}`); + +function httpRequest({ method = 'GET', path = '/', body = null, headers = {} }) { + return new Promise((resolve, reject) => { + const req = http.request( + { hostname: '127.0.0.1', port: PORT, path, method, headers: Object.assign({ 'Content-Type': 'application/json' }, headers) }, + (res) => { + let data = ''; + res.on('data', (chunk) => (data += chunk)); + res.on('end', () => resolve({ status: res.statusCode, body: data })); + } + ); + req.on('error', reject); + if (body) req.write(typeof body === 'string' ? body : JSON.stringify(body)); + req.end(); + }); +} + +describe('mcp-min list-envs tool (CJS)', () => { + beforeAll(async () => { + // Write a unique config file for this test + fs.writeFileSync(CONFIG_FILE, JSON.stringify({ staging: { url: 'https://staging.example.com' }, prod: { url: 'https://prod.example.com' } }, null, 2)); + // Set env var so files.getConfig() uses our test config + process.env.CONFIG_FILE_PATH = CONFIG_FILE; + + // Start the server using node to require ESM module via child process wrapper + const start = (await import('../http-server.js')).default; + server = await start({ port: PORT }); + }); + + afterAll(() => { + if (server) server.close(); + // Clean up test config file + if (fs.existsSync(CONFIG_FILE)) { + fs.unlinkSync(CONFIG_FILE); + } + delete process.env.CONFIG_FILE_PATH; + }); + + test('HTTP /call envs-list returns environments array', async () => { + const res = await httpRequest({ method: 'POST', path: '/call', body: { tool: 'envs-list', params: {} } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(Array.isArray(parsed.result.environments)).toBe(true); + const names = parsed.result.environments.map(e => e.name); + expect(names).toEqual(expect.arrayContaining(['staging', 'prod'])); + }); + + test('JSON-RPC tools/call returns text content with environments', async () => { + const res = await httpRequest({ method: 'POST', path: '/call-stream', body: { jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'envs-list', arguments: {} } } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result).toBeDefined(); + const content = parsed.result.content; + expect(Array.isArray(content)).toBe(true); + const text = content[0].text; + const decoded = JSON.parse(text); + expect(Array.isArray(decoded.environments)).toBe(true); + const names = decoded.environments.map(e => e.name); + expect(names).toEqual(expect.arrayContaining(['staging', 'prod'])); + }); +}); diff --git a/mcp-min/__tests__/list-envs.test.js b/mcp-min/__tests__/list-envs.test.js new file mode 100644 index 000000000..d2ae06c05 --- /dev/null +++ b/mcp-min/__tests__/list-envs.test.js @@ -0,0 +1,70 @@ + +const http = require('http'); +const path = require('path'); +const fs = require('fs'); + +let server; + +const PORT = 5920; +const CONFIG_FILE = path.resolve(`.pos.test-${PORT}`); + +function httpRequest({ method = 'GET', path = '/', body = null, headers = {} }) { + return new Promise((resolve, reject) => { + const req = http.request( + { hostname: '127.0.0.1', port: PORT, path, method, headers: Object.assign({ 'Content-Type': 'application/json' }, headers) }, + (res) => { + let data = ''; + res.on('data', (chunk) => (data += chunk)); + res.on('end', () => resolve({ status: res.statusCode, body: data })); + } + ); + req.on('error', reject); + if (body) req.write(typeof body === 'string' ? body : JSON.stringify(body)); + req.end(); + }); +} + +describe('mcp-min list-envs tool', () => { + beforeAll(async () => { + // Write a unique config file for this test + fs.writeFileSync(CONFIG_FILE, JSON.stringify({ staging: { url: 'https://staging.example.com' }, prod: { url: 'https://prod.example.com' } }, null, 2)); + // Set env var so files.getConfig() uses our test config + process.env.CONFIG_FILE_PATH = CONFIG_FILE; + + // Start the server on a custom port + const startHttp = (await import('../http-server.js')).default; + server = await startHttp({ port: PORT }); + }); + + afterAll(() => { + if (server) server.close(); + // Clean up test config file + if (fs.existsSync(CONFIG_FILE)) { + fs.unlinkSync(CONFIG_FILE); + } + delete process.env.CONFIG_FILE_PATH; + }); + + test('HTTP /call envs-list returns environments array', async () => { + const res = await httpRequest({ method: 'POST', path: '/call', body: { tool: 'envs-list', params: {} } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(Array.isArray(parsed.result.environments)).toBe(true); + const names = parsed.result.environments.map(e => e.name); + expect(names).toEqual(expect.arrayContaining(['staging', 'prod'])); + }); + + test('JSON-RPC tools/call returns text content with environments', async () => { + const res = await httpRequest({ method: 'POST', path: '/call-stream', body: { jsonrpc: '2.0', id: 1, method: 'tools/call', params: { name: 'envs-list', arguments: {} } } }); + expect(res.status).toBe(200); + const parsed = JSON.parse(res.body); + expect(parsed.result).toBeDefined(); + const content = parsed.result.content; + expect(Array.isArray(content)).toBe(true); + const text = content[0].text; + const decoded = JSON.parse(text); + expect(Array.isArray(decoded.environments)).toBe(true); + const names = decoded.environments.map(e => e.name); + expect(names).toEqual(expect.arrayContaining(['staging', 'prod'])); + }); +}); diff --git a/mcp-min/__tests__/logs.fetch.test.js b/mcp-min/__tests__/logs.fetch.test.js new file mode 100644 index 000000000..fc8accc6e --- /dev/null +++ b/mcp-min/__tests__/logs.fetch.test.js @@ -0,0 +1,54 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +// Mock proxy Gateway to control logs() +vi.mock('../../lib/proxy', () => { + class GatewayMock { + constructor({ url, token, email }) { + this.url = url; this.token = token; this.email = email; + this.calls = []; + } + async logs({ lastId }) { + // Return synthetic batches based on lastId + const data = { + '0': { logs: [{ id: '1', message: 'a' }, { id: '2', message: 'b' }] }, + '2': { logs: [{ id: '3', message: 'c' }] } + }; + return data[String(lastId)] || { logs: [] }; + } + } + return { default: GatewayMock, __esModule: true }; +}); + +const fetchModPath = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'logs', 'fetch.js')).href; + +describe('platformos.logs.fetch', () => { + let fetchTool; + beforeAll(async () => { + const mod = await import(fetchModPath); + fetchTool = mod.default; + }); + + test('paginates via lastId and returns collected logs', async () => { + class LocalGateway { + async logs({ lastId }) { + const data = { + '0': { logs: [{ id: '1', message: 'a' }, { id: '2', message: 'b' }] }, + '2': { logs: [{ id: '3', message: 'c' }] } + }; + return data[String(lastId)] || { logs: [] }; + } + } + const res = await fetchTool.handler({ url: 'https://x', email: 'e', token: 't', lastId: '0' }, { Gateway: LocalGateway }); + expect(Array.isArray(res.logs)).toBe(true); + expect(res.logs.map((r) => r.id)).toEqual(['1', '2', '3']); + }); + + test('respects limit', async () => { + class LocalGateway { async logs({ lastId }) { return { logs: [{ id: '1' }, { id: '2' }, { id: '3' }] }; } } + const res = await fetchTool.handler({ url: 'https://x', email: 'e', token: 't', lastId: '0', limit: 2 }, { Gateway: LocalGateway }); + expect(res.logs.map((r) => r.id)).toEqual(['1', '2']); + }); +}); diff --git a/mcp-min/__tests__/logs.stream.test.js b/mcp-min/__tests__/logs.stream.test.js new file mode 100644 index 000000000..eff9619ce --- /dev/null +++ b/mcp-min/__tests__/logs.stream.test.js @@ -0,0 +1,46 @@ + +import { pathToFileURL } from 'url'; +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +vi.useFakeTimers(); +vi.spyOn(global, 'setTimeout'); + +// Mock Gateway.logs +vi.mock('../../lib/proxy', () => { + class GatewayMock { + constructor() { this.calls = 0; } + async logs({ lastId }) { + this.calls++; + if (this.calls === 1) return { logs: [{ id: '1', message: 'a', error_type: 'info' }] }; + if (this.calls === 2) return { logs: [{ id: '2', message: 'b', error_type: 'error' }] }; + return { logs: [] }; + } + } + return { default: GatewayMock, __esModule: true }; +}); + +const toolUrl = pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'logs', 'stream.js')).href; + +describe('platformos.logs.stream', () => { + let tool; + beforeAll(async () => { + const mod = await import(toolUrl); + tool = mod.default; + }); + + test('emits data events and respects maxDuration', async () => { + const events = []; + const writer = (evt) => events.push(evt); + class LocalGateway { constructor(){ this.calls=0;} async logs(){ this.calls++; if (this.calls===1) return { logs:[{id:'1',error_type:'info'}]}; if(this.calls===2) return { logs:[{id:'2',error_type:'error'}]}; return { logs: []}; } } + const p = tool.streamHandler({ url: 'https://x', email: 'e', token: 't', interval: 50, maxDuration: 60 }, { writer, Gateway: LocalGateway }); + // flush initial tick + await Promise.resolve(); + // advance enough to include at least one scheduled tick and done + vi.advanceTimersByTime(1000); + // Give microtasks a chance + await Promise.resolve(); + expect(events.filter(e => e.event === 'data').length).toBeGreaterThanOrEqual(2); + expect(events.some(e => e.event === 'done')).toBe(true); + }); +}); diff --git a/mcp-min/__tests__/portal.endpoints-list.test.js b/mcp-min/__tests__/portal.endpoints-list.test.js new file mode 100644 index 000000000..7688b6f36 --- /dev/null +++ b/mcp-min/__tests__/portal.endpoints-list.test.js @@ -0,0 +1,136 @@ +import { vi, describe, test, expect } from 'vitest'; + +import endpointsListTool from '../portal/endpoints-list.js'; + +const mockConfig = { + master_token: 'test-token-123', + partner_portal_url: 'https://portal.example.com' +}; + +describe('endpoints-list', () => { + test('lists all endpoints successfully', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([ + { id: 1, name: 'US East', url: 'https://us-east.platformos.com', region: 'us-east-1' }, + { id: 2, name: 'EU West', url: 'https://eu-west.platformos.com', region: 'eu-west-1' }, + { id: 3, name: 'Asia Pacific', url: 'https://ap.platformos.com', region: 'ap-southeast-1' } + ]); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.endpoints).toHaveLength(3); + expect(res.data.endpoints[0]).toEqual({ + id: 1, + name: 'US East', + url: 'https://us-east.platformos.com', + region: 'us-east-1' + }); + expect(res.data.count).toBe(3); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'GET', + path: '/api/endpoints' + }) + ); + }); + + test('handles response with endpoints wrapper object', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + endpoints: [ + { id: 1, name: 'Wrapped Endpoint', url: 'https://example.com', region: 'test' } + ] + }); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.endpoints).toHaveLength(1); + expect(res.data.endpoints[0].name).toBe('Wrapped Endpoint'); + }); + + test('returns empty list when no endpoints', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([]); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.endpoints).toHaveLength(0); + expect(res.data.count).toBe(0); + }); + + test('handles missing optional fields', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([ + { id: 1, name: 'Minimal Endpoint' } + ]); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.endpoints[0]).toEqual({ + id: 1, + name: 'Minimal Endpoint', + url: undefined, + region: undefined + }); + }); + + test('handles network errors', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + new Error('Connection timeout') + ); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('ENDPOINTS_LIST_ERROR'); + expect(res.error.message).toContain('Connection timeout'); + }); + + test('handles authentication errors', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + Object.assign(new Error('Unauthorized'), { status: 401 }) + ); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('ENDPOINTS_LIST_ERROR'); + expect(res.error.message).toContain('Unauthorized'); + }); + + test('includes meta timestamps', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([]); + + const res = await endpointsListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(res.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); + + test('has correct schema with no required fields', () => { + expect(endpointsListTool.inputSchema.required).toEqual([]); + expect(endpointsListTool.inputSchema.properties).toEqual({}); + }); +}); diff --git a/mcp-min/__tests__/portal.env-add.test.js b/mcp-min/__tests__/portal.env-add.test.js new file mode 100644 index 000000000..c261e399b --- /dev/null +++ b/mcp-min/__tests__/portal.env-add.test.js @@ -0,0 +1,292 @@ +import { vi, describe, test, expect, beforeEach, afterEach } from 'vitest'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; + +import envAddTool from '../portal/env-add.js'; + +describe('env-add', () => { + let tempDir; + let originalCwd; + + beforeEach(() => { + tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'env-add-test-')); + originalCwd = process.cwd(); + process.chdir(tempDir); + }); + + afterEach(() => { + process.chdir(originalCwd); + fs.rmSync(tempDir, { recursive: true, force: true }); + }); + + describe('direct token', () => { + test('adds environment with provided token', async () => { + const res = await envAddTool.handler({ + environment: 'staging', + url: 'https://my-app.example.com', + token: 'direct-token-123' + }); + + expect(res.ok).toBe(true); + expect(res.data.environment).toBe('staging'); + expect(res.data.url).toBe('https://my-app.example.com/'); + expect(res.data.message).toContain('added successfully'); + + // Verify .pos file + const config = JSON.parse(fs.readFileSync(path.join(tempDir, '.pos'), 'utf8')); + expect(config.staging.url).toBe('https://my-app.example.com/'); + expect(config.staging.token).toBe('direct-token-123'); + }); + + test('adds trailing slash to URL', async () => { + const res = await envAddTool.handler({ + environment: 'prod', + url: 'https://example.com', + token: 'token123' + }); + + expect(res.ok).toBe(true); + expect(res.data.url).toBe('https://example.com/'); + }); + + test('preserves existing environments in .pos', async () => { + // Create existing .pos + fs.writeFileSync(path.join(tempDir, '.pos'), JSON.stringify({ + existing: { url: 'https://existing.com/', token: 'old-token' } + })); + + const res = await envAddTool.handler({ + environment: 'staging', + url: 'https://new.com', + token: 'new-token' + }); + + expect(res.ok).toBe(true); + + const config = JSON.parse(fs.readFileSync(path.join(tempDir, '.pos'), 'utf8')); + expect(config.existing.token).toBe('old-token'); + expect(config.staging.token).toBe('new-token'); + }); + + test('includes email and partner_portal_url when provided', async () => { + const res = await envAddTool.handler({ + environment: 'staging', + url: 'https://example.com', + token: 'token123', + email: 'user@example.com', + partner_portal_url: 'https://custom-portal.com' + }); + + expect(res.ok).toBe(true); + + const config = JSON.parse(fs.readFileSync(path.join(tempDir, '.pos'), 'utf8')); + expect(config.staging.email).toBe('user@example.com'); + expect(config.staging.partner_portal_url).toBe('https://custom-portal.com'); + }); + }); + + describe('URL validation', () => { + test('rejects invalid URL', async () => { + const res = await envAddTool.handler({ + environment: 'staging', + url: 'not-a-valid-url', + token: 'token123' + }); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('INVALID_URL'); + }); + }); + + describe('device authorization with background waiter', () => { + test('returns verification URL immediately and spawns background waiter', async () => { + const mockFetch = vi.fn().mockResolvedValueOnce({ + ok: true, + status: 200, + json: () => Promise.resolve({ + verification_uri_complete: 'https://portal.example.com/verify?code=ABC123', + device_code: 'device-code-xyz', + interval: 5 + }) + }); + + const res = await envAddTool.handler( + { environment: 'staging', url: 'https://my-app.example.com' }, + { fetch: mockFetch } + ); + + expect(res.ok).toBe(true); + expect(res.data.status).toBe('awaiting_authorization'); + expect(res.data.verification_url).toBe('https://portal.example.com/verify?code=ABC123'); + expect(res.data.waiter_id).toBeDefined(); + expect(res.data.timeout_seconds).toBe(60); + expect(res.data.message).toContain('Background waiter active'); + + // Should NOT create .pos file immediately (waiter does it) + expect(fs.existsSync(path.join(tempDir, '.pos'))).toBe(false); + }); + + test('handles instance not registered error', async () => { + const mockFetch = vi.fn().mockResolvedValueOnce({ + ok: false, + status: 404, + text: () => Promise.resolve('Not found') + }); + + const res = await envAddTool.handler( + { environment: 'staging', url: 'https://unregistered.example.com' }, + { fetch: mockFetch } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('INSTANCE_NOT_REGISTERED'); + expect(res.error.message).toContain('not registered'); + }); + + test('background waiter saves .pos when authorization completes', async () => { + let pollCount = 0; + const mockFetch = vi.fn() + // First call: device auth + .mockResolvedValueOnce({ + ok: true, + status: 200, + json: () => Promise.resolve({ + verification_uri_complete: 'https://portal.example.com/verify', + device_code: 'device-123', + interval: 0.1 // Fast for testing + }) + }); + + // Mock storeEnvironment to verify it's called + const storeEnvironment = vi.fn(); + + const res = await envAddTool.handler( + { environment: 'staging', url: 'https://my-app.example.com', timeout_seconds: 2 }, + { + fetch: async (url, opts) => { + // First call is device auth + if (pollCount === 0) { + pollCount++; + return mockFetch(url, opts); + } + // Subsequent calls are token polls - return success immediately + return { + ok: true, + status: 200, + json: () => Promise.resolve({ access_token: 'the-token' }) + }; + }, + storeEnvironment + } + ); + + expect(res.ok).toBe(true); + expect(res.data.status).toBe('awaiting_authorization'); + + // Wait for background waiter to complete + await new Promise(resolve => setTimeout(resolve, 500)); + + // storeEnvironment should have been called by background waiter + expect(storeEnvironment).toHaveBeenCalledWith( + expect.objectContaining({ + environment: 'staging', + token: 'the-token' + }) + ); + }); + }); + + describe('schema', () => { + test('has correct required fields', () => { + expect(envAddTool.inputSchema.required).toContain('environment'); + expect(envAddTool.inputSchema.required).toContain('url'); + expect(envAddTool.inputSchema.required).not.toContain('token'); + }); + + test('has optional fields', () => { + const props = envAddTool.inputSchema.properties; + expect(props.token).toBeDefined(); + expect(props.email).toBeDefined(); + expect(props.partner_portal_url).toBeDefined(); + expect(props.timeout_seconds).toBeDefined(); + }); + }); + + describe('meta timestamps', () => { + test('includes timestamps in response', async () => { + const res = await envAddTool.handler({ + environment: 'staging', + url: 'https://example.com', + token: 'token123' + }); + + expect(res.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(res.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); + }); + + describe('portal URL priority', () => { + const originalEnv = process.env.PARTNER_PORTAL_URL; + + afterEach(() => { + if (originalEnv) { + process.env.PARTNER_PORTAL_URL = originalEnv; + } else { + delete process.env.PARTNER_PORTAL_URL; + } + }); + + test('uses PARTNER_PORTAL_URL env var when set', async () => { + process.env.PARTNER_PORTAL_URL = 'https://env-portal.example.com'; + + const mockFetch = vi.fn().mockResolvedValueOnce({ + ok: true, + status: 200, + json: () => Promise.resolve({ + verification_uri_complete: 'https://env-portal.example.com/verify', + device_code: 'test', + interval: 5 + }) + }); + + await envAddTool.handler( + { environment: 'staging', url: 'https://my-app.example.com' }, + { fetch: mockFetch } + ); + + expect(mockFetch).toHaveBeenCalledWith( + 'https://env-portal.example.com/oauth/authorize_device', + expect.any(Object) + ); + }); + + test('parameter overrides env var', async () => { + process.env.PARTNER_PORTAL_URL = 'https://env-portal.example.com'; + + const mockFetch = vi.fn().mockResolvedValueOnce({ + ok: true, + status: 200, + json: () => Promise.resolve({ + verification_uri_complete: 'https://param-portal.example.com/verify', + device_code: 'test', + interval: 5 + }) + }); + + await envAddTool.handler( + { + environment: 'staging', + url: 'https://my-app.example.com', + partner_portal_url: 'https://param-portal.example.com' + }, + { fetch: mockFetch } + ); + + expect(mockFetch).toHaveBeenCalledWith( + 'https://param-portal.example.com/oauth/authorize_device', + expect.any(Object) + ); + }); + }); +}); diff --git a/mcp-min/__tests__/portal.instance-create.test.js b/mcp-min/__tests__/portal.instance-create.test.js new file mode 100644 index 000000000..7b83ead12 --- /dev/null +++ b/mcp-min/__tests__/portal.instance-create.test.js @@ -0,0 +1,209 @@ +import { vi, describe, test, expect, beforeEach } from 'vitest'; + +import instanceCreateTool from '../portal/instance-create.js'; + +const mockConfig = { + master_token: 'test-token-123', + partner_portal_url: 'https://portal.example.com' +}; + +describe('instance-create', () => { + test('creates instance successfully when name is available', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: true }) // name check + .mockResolvedValueOnce({ acknowledged: true }); // create + + const res = await instanceCreateTool.handler( + { + name: 'my-new-instance', + partner_id: 123, + endpoint_id: 456, + billing_plan_id: 789 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.acknowledged).toBe(true); + expect(res.data.name).toBe('my-new-instance'); + expect(res.data.message).toContain('Instance creation started'); + + // Verify name check call + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'GET', + path: '/api/instance_name_checks/my-new-instance' + }) + ); + + // Verify create call + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'POST', + path: '/api/tasks/instance/create', + body: { + instance_billing_plan_type_id: 789, + partner_id: 123, + instance_params: { + endpoint_id: 456, + name: 'my-new-instance', + tag_list: [] + } + } + }) + ); + }); + + test('includes tags when provided', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: true }) + .mockResolvedValueOnce({ acknowledged: true }); + + await instanceCreateTool.handler( + { + name: 'tagged-instance', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 3, + tags: ['production', 'client-abc'] + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + body: expect.objectContaining({ + instance_params: expect.objectContaining({ + tag_list: ['production', 'client-abc'] + }) + }) + }) + ); + }); + + test('returns error when name is unavailable', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: false }); + + const res = await instanceCreateTool.handler( + { + name: 'taken-name', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 3 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('NAME_UNAVAILABLE'); + expect(res.error.message).toContain('taken-name'); + + // Should not call create endpoint + expect(portalRequest).toHaveBeenCalledTimes(1); + }); + + test('handles validation errors from API (422)', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: true }) + .mockRejectedValueOnce(Object.assign( + new Error('Invalid billing plan'), + { status: 422, data: { errors: ['billing_plan_id is invalid'] } } + )); + + const res = await instanceCreateTool.handler( + { + name: 'valid-name', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 999 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('VALIDATION_ERROR'); + expect(res.error.message).toContain('Invalid billing plan'); + }); + + test('handles network/server errors', async () => { + const portalRequest = vi.fn() + .mockRejectedValueOnce(new Error('Network timeout')); + + const res = await instanceCreateTool.handler( + { + name: 'test-instance', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 3 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('INSTANCE_CREATE_ERROR'); + expect(res.error.message).toContain('Network timeout'); + }); + + test('URL-encodes special characters in name', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: true }) + .mockResolvedValueOnce({ acknowledged: true }); + + await instanceCreateTool.handler( + { + name: 'test instance', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 3 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + path: '/api/instance_name_checks/test%20instance' + }) + ); + }); + + test('includes meta timestamps in response', async () => { + const portalRequest = vi.fn() + .mockResolvedValueOnce({ available: true }) + .mockResolvedValueOnce({ acknowledged: true }); + + const res = await instanceCreateTool.handler( + { + name: 'test', + partner_id: 1, + endpoint_id: 2, + billing_plan_id: 3 + }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.meta).toBeDefined(); + expect(res.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(res.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); + + test('has correct schema', () => { + expect(instanceCreateTool.inputSchema.required).toContain('name'); + expect(instanceCreateTool.inputSchema.required).toContain('partner_id'); + expect(instanceCreateTool.inputSchema.required).toContain('endpoint_id'); + expect(instanceCreateTool.inputSchema.required).toContain('billing_plan_id'); + + expect(instanceCreateTool.inputSchema.properties.tags.type).toBe('array'); + expect(instanceCreateTool.inputSchema.properties.tags.items.type).toBe('string'); + }); +}); + +describe('portal-client', () => { + // Basic integration test for the client module structure + test('portal-client exports expected functions', async () => { + const { getPortalConfig, portalRequest } = await import('../portal/portal-client.js'); + + expect(typeof getPortalConfig).toBe('function'); + expect(typeof portalRequest).toBe('function'); + }); +}); diff --git a/mcp-min/__tests__/portal.partner-get.test.js b/mcp-min/__tests__/portal.partner-get.test.js new file mode 100644 index 000000000..b19fe9307 --- /dev/null +++ b/mcp-min/__tests__/portal.partner-get.test.js @@ -0,0 +1,160 @@ +import { vi, describe, test, expect } from 'vitest'; + +import partnerGetTool from '../portal/partner-get.js'; + +const mockConfig = { + master_token: 'test-token-123', + partner_portal_url: 'https://portal.example.com' +}; + +describe('partner-get', () => { + test('fetches partner with billing plans', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 42, + name: 'Acme Corp', + email: 'admin@acme.com', + created_at: '2024-01-15T10:00:00Z', + instance_billing_plan_types: [ + { id: 101, name: 'Free Tier', code: 'free', description: 'For development', price: 0, currency: 'USD' }, + { id: 102, name: 'Production', code: 'prod', description: 'For live apps', price: 99, currency: 'USD' }, + { id: 103, name: 'Enterprise', code: 'ent', description: 'Custom solutions', price: 499, currency: 'USD' } + ] + }); + + const res = await partnerGetTool.handler( + { partner_id: 42 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partner.id).toBe(42); + expect(res.data.partner.name).toBe('Acme Corp'); + expect(res.data.partner.email).toBe('admin@acme.com'); + expect(res.data.billing_plans).toHaveLength(3); + expect(res.data.billing_plans_count).toBe(3); + + expect(res.data.billing_plans[0]).toEqual({ + id: 101, + name: 'Free Tier', + code: 'free', + description: 'For development', + price: 0, + currency: 'USD' + }); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'GET', + path: '/api/partners/42' + }) + ); + }); + + test('handles partner with no billing plans', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 1, + name: 'New Partner', + email: 'new@partner.com' + }); + + const res = await partnerGetTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partner.name).toBe('New Partner'); + expect(res.data.billing_plans).toEqual([]); + expect(res.data.billing_plans_count).toBe(0); + }); + + test('handles billing plans with missing optional fields', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 1, + name: 'Partner', + instance_billing_plan_types: [ + { id: 101, name: 'Basic', code: 'basic' } + ] + }); + + const res = await partnerGetTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.billing_plans[0]).toEqual({ + id: 101, + name: 'Basic', + code: 'basic', + description: undefined, + price: undefined, + currency: undefined + }); + }); + + test('returns error when partner not found', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + Object.assign(new Error('Partner not found'), { status: 404 }) + ); + + const res = await partnerGetTool.handler( + { partner_id: 999 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('PARTNER_NOT_FOUND'); + expect(res.error.message).toContain('not found'); + }); + + test('handles authentication errors', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + Object.assign(new Error('Invalid token'), { status: 401 }) + ); + + const res = await partnerGetTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('PARTNER_GET_ERROR'); + expect(res.error.message).toContain('Invalid token'); + }); + + test('handles network errors', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + new Error('Connection refused') + ); + + const res = await partnerGetTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('PARTNER_GET_ERROR'); + expect(res.error.message).toContain('Connection refused'); + }); + + test('includes meta timestamps', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 1, + name: 'Test' + }); + + const res = await partnerGetTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(res.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); + + test('has correct schema with required partner_id', () => { + expect(partnerGetTool.inputSchema.required).toContain('partner_id'); + expect(partnerGetTool.inputSchema.properties.partner_id.type).toBe('number'); + }); +}); diff --git a/mcp-min/__tests__/portal.partners-list.test.js b/mcp-min/__tests__/portal.partners-list.test.js new file mode 100644 index 000000000..e6f861dcc --- /dev/null +++ b/mcp-min/__tests__/portal.partners-list.test.js @@ -0,0 +1,159 @@ +import { vi, describe, test, expect } from 'vitest'; + +import partnersListTool from '../portal/partners-list.js'; + +const mockConfig = { + master_token: 'test-token-123', + partner_portal_url: 'https://portal.example.com' +}; + +describe('partners-list', () => { + test('lists all partners successfully', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([ + { id: 1, name: 'Partner One' }, + { id: 2, name: 'Partner Two' }, + { id: 3, name: 'Partner Three' } + ]); + + const res = await partnersListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partners).toHaveLength(3); + expect(res.data.partners[0]).toEqual({ id: 1, name: 'Partner One' }); + expect(res.data.count).toBe(3); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'GET', + path: '/api/partners' + }) + ); + }); + + test('handles response with partners wrapper object', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + partners: [ + { id: 1, name: 'Wrapped Partner' } + ] + }); + + const res = await partnersListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partners).toHaveLength(1); + expect(res.data.partners[0].name).toBe('Wrapped Partner'); + }); + + test('returns empty list when no partners', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([]); + + const res = await partnersListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partners).toHaveLength(0); + expect(res.data.count).toBe(0); + }); + + test('fetches specific partner with billing plans', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 42, + name: 'Acme Corp', + instance_billing_plan_types: [ + { id: 101, name: 'Free Tier', code: 'free' }, + { id: 102, name: 'Production', code: 'prod' } + ] + }); + + const res = await partnersListTool.handler( + { partner_id: 42 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partner.id).toBe(42); + expect(res.data.partner.name).toBe('Acme Corp'); + expect(res.data.partner.billing_plans).toHaveLength(2); + expect(res.data.partner.billing_plans[0]).toEqual({ + id: 101, + name: 'Free Tier', + code: 'free' + }); + + expect(portalRequest).toHaveBeenCalledWith( + expect.objectContaining({ + method: 'GET', + path: '/api/partners/42' + }) + ); + }); + + test('handles partner with no billing plans', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce({ + id: 1, + name: 'No Plans Partner' + }); + + const res = await partnersListTool.handler( + { partner_id: 1 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(true); + expect(res.data.partner.billing_plans).toEqual([]); + }); + + test('returns error when partner not found', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + Object.assign(new Error('Partner not found'), { status: 404 }) + ); + + const res = await partnersListTool.handler( + { partner_id: 999 }, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('PARTNER_NOT_FOUND'); + }); + + test('handles network errors', async () => { + const portalRequest = vi.fn().mockRejectedValueOnce( + new Error('Connection refused') + ); + + const res = await partnersListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('PARTNERS_LIST_ERROR'); + expect(res.error.message).toContain('Connection refused'); + }); + + test('includes meta timestamps', async () => { + const portalRequest = vi.fn().mockResolvedValueOnce([]); + + const res = await partnersListTool.handler( + {}, + { portalRequest, portalConfig: mockConfig } + ); + + expect(res.meta.startedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + expect(res.meta.finishedAt).toMatch(/^\d{4}-\d{2}-\d{2}T/); + }); + + test('has correct schema', () => { + expect(partnersListTool.inputSchema.required).toEqual([]); + expect(partnersListTool.inputSchema.properties.partner_id.type).toBe('number'); + }); +}); diff --git a/mcp-min/__tests__/sse.test.js b/mcp-min/__tests__/sse.test.js new file mode 100644 index 000000000..9b7152bc3 --- /dev/null +++ b/mcp-min/__tests__/sse.test.js @@ -0,0 +1,75 @@ + +import http from 'http'; +import startHttp from '../http-server.js'; +import fixtures from '../../test/utils/fixtures'; + +const PORT = 5940; +let server; + +beforeAll(async () => { + fixtures.writeDotPos({ staging: { url: 'https://staging.example.com' } }); + server = await startHttp({ port: PORT }); +}); + +afterAll(() => { + if (server) server.close(); + fixtures.removeDotPos(); +}); + +function sseRequest(path = '/') { + return new Promise((resolve, reject) => { + const opts = { hostname: '127.0.0.1', port: PORT, path, method: 'GET', headers: { Accept: 'text/event-stream' } }; + let captured = ''; + let statusCode = 200; + const req = http.request(opts, (res) => { + statusCode = res.statusCode; + res.on('data', (chunk) => { + captured += chunk.toString(); + if (captured.includes('event: endpoint')) { + // Close as soon as we see the initial event + resolve({ status: statusCode, body: captured }); + req.destroy(); + } + }); + res.on('end', () => resolve({ status: statusCode, body: captured })); + }); + req.on('error', (err) => { + // Treat expected abort-related errors as a successful early return with captured data + const msg = String(err).toLowerCase(); + if (msg.includes('econnreset') || msg.includes('socket hang up') || msg.includes('aborted')) { + // Already resolved above if endpoint was seen + return; + } + reject(err); + }); + req.end(); + }); +} + +test('GET / with Accept: text/event-stream returns SSE framing', async () => { + const res = await sseRequest('/'); + expect(res.status).toBe(200); + expect(res.body.includes(': connected')).toBe(true); + expect(res.body.includes('event: endpoint')).toBe(true); +}, 15000); + +// Test POST /call-stream legacy streaming initial events +function postCallStream(body) { + return new Promise((resolve, reject) => { + const req = http.request({ hostname: '127.0.0.1', port: PORT, path: '/call-stream', method: 'POST', headers: { 'Content-Type': 'application/json' } }, (res) => { + let data = ''; + res.on('data', (chunk) => { data += chunk.toString(); if (data.length > 64 * 1024) req.abort(); }); + res.on('end', () => resolve({ status: res.statusCode, body: data })); + }); + req.on('error', reject); + req.write(JSON.stringify(body)); + req.end(); + }); +} + +test('POST /call-stream legacy returns SSE initial events', async () => { + const res = await postCallStream({ tool: 'envs-list', params: {} }); + // server responds with text/event-stream framing but via express it will return 200 and then close; we check body + expect(res.status).toBe(200); + expect(res.body.includes('event: endpoint')).toBe(true); +}); diff --git a/mcp-min/__tests__/stdio.test.js b/mcp-min/__tests__/stdio.test.js new file mode 100644 index 000000000..6c864ccaa --- /dev/null +++ b/mcp-min/__tests__/stdio.test.js @@ -0,0 +1,110 @@ + +const { spawn } = require('child_process'); +const path = require('path'); + +// Use absolute path resolved from cwd to support ESM/CommonJS under Jest +const script = path.resolve(process.cwd(), 'mcp-min', 'stdio-server.js'); + +function runServer() { + const child = spawn(process.execPath, ['--experimental-vm-modules', script], { stdio: ['pipe', 'pipe', 'pipe'] }); + // Ensure child is killed after tests finish to avoid open handles + child.unref && child.unref(); + return child; +} + +describe('mcp-min stdio server', () => { + test('responds to MCP initialize and tools/call', (done) => { + const child = runServer(); + let initialized = false; + + child.stdout.on('data', (c) => { + const s = c.toString(); + // Wait for initialize response, then call a tool + if (!initialized && s.includes('protocolVersion')) { + initialized = true; + child.stdin.write(JSON.stringify({ + jsonrpc: '2.0', + id: 2, + method: 'tools/call', + params: { name: 'envs-list', arguments: {} } + }) + '\n'); + } + // Check tool response + if (s.includes('"content"') && s.includes('environments')) { + expect(s.includes('environments')).toBe(true); + child.kill(); + done(); + } + }); + + // Send MCP initialize request + child.stdin.write(JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'initialize', + params: { protocolVersion: '2024-11-05', capabilities: {} } + }) + '\n'); + }, 15000); + + test('exits cleanly with code 0 on EPIPE (client disconnects)', (done) => { + const child = runServer(); + + child.stdout.on('data', (c) => { + const s = c.toString(); + if (s.includes('protocolVersion')) { + // Close the parent's read end of stdout pipe to simulate client disconnect + child.stdout.destroy(); + + // Send another request — the response write will hit a broken pipe + child.stdin.write(JSON.stringify({ + jsonrpc: '2.0', + id: 2, + method: 'tools/list', + params: {} + }) + '\n'); + } + }); + + child.on('exit', (code, signal) => { + // Should exit cleanly, not crash + expect(code === 0 || signal === 'SIGPIPE').toBe(true); + done(); + }); + + // Bootstrap with MCP initialize + child.stdin.write(JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'initialize', + params: { protocolVersion: '2024-11-05', capabilities: {} } + }) + '\n'); + }, 15000); + + test('handles invalid JSON input gracefully', (done) => { + const child = runServer(); + let initialized = false; + + child.stdout.on('data', (c) => { + const s = c.toString(); + // Wait for initialize response, then send invalid JSON + if (!initialized && s.includes('protocolVersion')) { + initialized = true; + child.stdin.write('not a json\n'); + } + // Check error response (MCP protocol: JSON-RPC error with code -32700) + if (s.includes('"error"') && !s.includes('protocolVersion')) { + expect(s.includes('Parse error') || s.includes('-32700')).toBe(true); + child.kill(); + done(); + } + }); + + // Send MCP initialize request first + child.stdin.write(JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'initialize', + params: { protocolVersion: '2024-11-05', capabilities: {} } + }) + '\n'); + }, 15000); +}); diff --git a/mcp-min/__tests__/tests.run-async.test.js b/mcp-min/__tests__/tests.run-async.test.js new file mode 100644 index 000000000..f9072b2cc --- /dev/null +++ b/mcp-min/__tests__/tests.run-async.test.js @@ -0,0 +1,329 @@ +import { vi, describe, test, expect, beforeAll } from 'vitest'; + +vi.mock('../../lib/files', () => ({ + default: { getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) }, + getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) +})); + +vi.mock('../../lib/settings', () => ({ + default: { fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) }, + fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) +})); + +describe('tests-run-async tool', () => { + let testsRunAsyncTool; + + beforeAll(async () => { + const module = await import('../tests/run-async.js'); + testsRunAsyncTool = module.default; + }); + + test('has correct description and inputSchema', () => { + expect(testsRunAsyncTool.description).toContain('run_async'); + expect(testsRunAsyncTool.description).toContain('tests-run-async-result'); + expect(testsRunAsyncTool.inputSchema.properties).toHaveProperty('env'); + expect(testsRunAsyncTool.inputSchema.properties).not.toHaveProperty('intervalMs'); + expect(testsRunAsyncTool.inputSchema.properties).not.toHaveProperty('maxWaitMs'); + }); + + test('triggers run_async and returns immediately with run id', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ id: '42', test_name: 'liquid_test_abc', status: 'pending', result_url: '/_tests/results/42' }) + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.id).toBe('42'); + expect(result.data.test_name).toBe('liquid_test_abc'); + expect(result.data.status).toBe('pending'); + expect(result.data.result_url).toBe('/_tests/results/42'); + expect(mockRequest).toHaveBeenCalledTimes(1); + expect(mockRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: expect.stringContaining('/_tests/run_async') + })); + }); + + test('returns error on HTTP failure', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 500, + body: 'Internal Server Error' + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('HTTP_ERROR'); + expect(result.error.statusCode).toBe(500); + }); + + test('returns error when response is not JSON', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: 'not json' + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_RESPONSE'); + }); + + test('returns error when response has no id', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ status: 'pending' }) + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('MISSING_ID'); + }); + + test('returns error on network failure', async () => { + const mockRequest = vi.fn().mockRejectedValue(new Error('ECONNREFUSED')); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('TESTS_RUN_ASYNC_ERROR'); + expect(result.error.message).toContain('ECONNREFUSED'); + }); + + test('includes auth metadata in response', async () => { + vi.stubEnv('MPKIT_URL', ''); + vi.stubEnv('MPKIT_EMAIL', ''); + vi.stubEnv('MPKIT_TOKEN', ''); + + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ id: '1', test_name: 'liquid_test_meta', status: 'pending' }) + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + vi.unstubAllEnvs(); + + expect(result.ok).toBe(true); + expect(result.meta.auth).toBeDefined(); + expect(result.meta.auth.url).toContain('staging'); + expect(result.meta.auth.token).toMatch(/^tes\.\.\.ken$/); + expect(result.meta.startedAt).toBeDefined(); + expect(result.meta.finishedAt).toBeDefined(); + }); + + test('builds result_url from id when not in response', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ id: '77', test_name: 'liquid_test_nurl', status: 'pending' }) + }); + + const result = await testsRunAsyncTool.handler( + { env: 'staging' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.result_url).toBe('/_tests/results/77'); + }); +}); + +describe('tests-run-async-result tool', () => { + let testsRunAsyncResultTool; + + beforeAll(async () => { + const module = await import('../tests/run-async-result.js'); + testsRunAsyncResultTool = module.default; + }); + + test('has correct description and inputSchema', () => { + expect(testsRunAsyncResultTool.description).toContain('results'); + expect(testsRunAsyncResultTool.inputSchema.properties).toHaveProperty('id'); + expect(testsRunAsyncResultTool.inputSchema.required).toContain('id'); + }); + + test('returns pending status', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ + id: '42', test_name: 'liquid_test_abc', status: 'pending', + total_assertions: '', total_errors: '', total_duration: '', error_message: '', tests: [] + }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '42' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('pending'); + expect(result.data.pending).toBe(true); + expect(result.data.done).toBe(false); + expect(result.data.passed).toBe(false); + expect(mockRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: expect.stringContaining('/_tests/results/42') + })); + }); + + test('returns success status with parsed numbers', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ + id: '42', test_name: 'liquid_test_abc', status: 'success', + total_assertions: '10', total_errors: '0', total_duration: '18', error_message: '', + tests: [{ errors: {}, success: true, total: 10, test_path: 'modules/core/tests/helpers/url_for_test' }] + }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '42' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('success'); + expect(result.data.passed).toBe(true); + expect(result.data.done).toBe(true); + expect(result.data.pending).toBe(false); + expect(result.data.total_assertions).toBe(10); + expect(result.data.total_errors).toBe(0); + expect(result.data.total_duration).toBe(18); + expect(result.data.tests).toHaveLength(1); + }); + + test('returns failed status', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ + id: '10', status: 'failed', total_assertions: '8', total_errors: '2', + total_duration: '50', error_message: '', + tests: [{ errors: { msg: 'expected true' }, success: false, total: 8, test_path: 'tests/example' }] + }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '10' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('failed'); + expect(result.data.passed).toBe(false); + expect(result.data.done).toBe(true); + expect(result.data.total_errors).toBe(2); + }); + + test('returns error status when runner crashed', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ + status: 'error', error_message: 'Liquid syntax error: unexpected tag', tests: [] + }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '5' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.status).toBe('error'); + expect(result.data.done).toBe(true); + expect(result.data.error_message).toContain('Liquid syntax error'); + }); + + test('returns NOT_FOUND error', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ error: 'not_found' }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '999' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('NOT_FOUND'); + }); + + test('returns error on HTTP failure', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 502, body: 'Bad Gateway' + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '7' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('HTTP_ERROR'); + expect(result.error.statusCode).toBe(502); + }); + + test('returns error on invalid JSON response', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, body: 'not json' + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '8' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('INVALID_RESPONSE'); + }); + + test('returns error on network failure', async () => { + const mockRequest = vi.fn().mockRejectedValue(new Error('ECONNREFUSED')); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '1' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('TESTS_RESULT_ERROR'); + expect(result.error.message).toContain('ECONNREFUSED'); + }); + + test('includes auth metadata in response', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: JSON.stringify({ id: '1', status: 'pending', tests: [] }) + }); + + const result = await testsRunAsyncResultTool.handler( + { env: 'staging', id: '1' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.meta.auth.url).toContain('staging'); + expect(result.meta.auth.token).toMatch(/^.{3}\.\.\..{3}$/); + expect(result.meta.url).toContain('/_tests/results/1'); + }); +}); diff --git a/mcp-min/__tests__/tests.run.test.js b/mcp-min/__tests__/tests.run.test.js new file mode 100644 index 000000000..6d63240a3 --- /dev/null +++ b/mcp-min/__tests__/tests.run.test.js @@ -0,0 +1,387 @@ +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; + +// Mock the pos-cli libs before importing tools +vi.mock('../../lib/files', () => ({ + default: { getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) }, + getConfig: () => ({ staging: { url: 'https://staging.example.com', token: 'test-token', email: 'test@example.com' } }) +})); + +vi.mock('../../lib/settings', () => ({ + default: { fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) }, + fetchSettings: (env) => ({ url: `https://${env}.example.com`, token: 'test-token', email: 'test@example.com' }) +})); + +vi.mock('request-promise', () => ({ + default: vi.fn() +})); + +describe('unit-tests-run tool', () => { + let testsRunTool; + let parseTestResponse; + let extractJsonObjects; + + beforeAll(async () => { + const module = await import('../tests/run.js'); + testsRunTool = module.default; + parseTestResponse = module.parseTestResponse; + extractJsonObjects = module.extractJsonObjects; + }); + + describe('parseTestResponse', () => { + describe('JSON format', () => { + test('parses simple passing test response', () => { + const response = `{"path":"tests/example_test"} +------------------------ +Assertions: 5. Failed: 0. Time: 50ms`; + + const result = parseTestResponse(response); + + expect(result.summary.assertions).toBe(5); + expect(result.summary.failed).toBe(0); + expect(result.summary.timeMs).toBe(50); + expect(result.tests).toHaveLength(1); + expect(result.tests[0].path).toBe('tests/example_test'); + }); + + test('parses test response with error', () => { + const response = `{"path":"tests/timezones/list_test"}{"class_name":"Liquid::Error","message":"Liquid error: can't find partial"} +------------------------ +Assertions: 0. Failed: 0. Time: 31ms`; + + const result = parseTestResponse(response); + + expect(result.summary.assertions).toBe(0); + expect(result.summary.timeMs).toBe(31); + expect(result.tests).toHaveLength(1); + expect(result.tests[0].path).toBe('tests/timezones/list_test'); + expect(result.tests[0].error.className).toBe('Liquid::Error'); + expect(result.tests[0].error.message).toContain("can't find partial"); + }); + + test('parses multiple tests', () => { + const response = `{"path":"tests/test1"} +------------------------ +{"path":"tests/test2"} +------------------------ +{"path":"tests/test3"}{"class_name":"Error","message":"failed"} +------------------------ +Assertions: 10. Failed: 1. Time: 200ms`; + + const result = parseTestResponse(response); + + expect(result.tests).toHaveLength(3); + expect(result.tests[0].path).toBe('tests/test1'); + expect(result.tests[1].path).toBe('tests/test2'); + expect(result.tests[2].path).toBe('tests/test3'); + expect(result.tests[2].error).toBeDefined(); + expect(result.summary.assertions).toBe(10); + expect(result.summary.failed).toBe(1); + }); + }); + + describe('Text/Indented format', () => { + test('parses indented test format with passing and failing tests', () => { + const response = `------------------------ + +commands/questions/create_test + + build_valid should be valid: + + errors_populated translation missing: en.test.should.be_true + + +commands/questions/update_test + + build_valid should be valid: + + +simple_test + + simple_valid should be valid: + + +------------------------ + + +Failed_ + + Total errors: 4 + + + +Assertions: 11. Failed: 4. Time: 267ms`; + + const result = parseTestResponse(response); + + expect(result.summary.assertions).toBe(11); + expect(result.summary.failed).toBe(4); + expect(result.summary.timeMs).toBe(267); + expect(result.summary.totalErrors).toBe(4); + + expect(result.tests).toHaveLength(3); + + // First test with one pass and one fail + expect(result.tests[0].path).toBe('commands/questions/create_test'); + expect(result.tests[0].cases).toHaveLength(2); + expect(result.tests[0].cases[0].name).toBe('build_valid'); + expect(result.tests[0].cases[0].passed).toBe(true); + expect(result.tests[0].cases[0].description).toBe('should be valid'); + expect(result.tests[0].cases[1].name).toBe('errors_populated'); + expect(result.tests[0].cases[1].passed).toBe(false); + expect(result.tests[0].cases[1].error).toContain('translation missing'); + expect(result.tests[0].cases[1].error).toContain('en.test.should.be_true'); + + // Second test - all passing + expect(result.tests[1].path).toBe('commands/questions/update_test'); + expect(result.tests[1].passed).toBe(true); + + // Third test - all passing + expect(result.tests[2].path).toBe('simple_test'); + expect(result.tests[2].passed).toBe(true); + }); + + test('parses test with only passing cases', () => { + const response = `------------------------ +my_test + + case_one should work: + + case_two should also work: + +------------------------ +Assertions: 2. Failed: 0. Time: 50ms`; + + const result = parseTestResponse(response); + + expect(result.tests).toHaveLength(1); + expect(result.tests[0].path).toBe('my_test'); + expect(result.tests[0].passed).toBe(true); + expect(result.tests[0].cases).toHaveLength(2); + expect(result.tests[0].cases.every(c => c.passed)).toBe(true); + }); + + test('parses mixed format with SYNTAX ERROR and indented tests', () => { + const response = `SYNTAX ERROR:{"path":"tests/timezones/convert_test"}{"class_name":"LiquidArgumentError","message":"Liquid error: hash_merge filter - first argument must be a hash"} +------------------------ + +tests/timezones/list_test + + result.results should not be blank + + has_results translation missing: en.test.should.be_true + + first.region should not be blank + + first.name should not be blank + + au_sorted translation missing: en.test.should.be_true + + +------------------------ + + +Failed_ + + Total errors: 5 + + + +Assertions: 5. Failed: 5. Time: 123ms`; + + const result = parseTestResponse(response); + + expect(result.summary.assertions).toBe(5); + expect(result.summary.failed).toBe(5); + expect(result.summary.timeMs).toBe(123); + expect(result.summary.totalErrors).toBe(5); + + expect(result.tests).toHaveLength(2); + + // First test - syntax error from JSON + expect(result.tests[0].path).toBe('tests/timezones/convert_test'); + expect(result.tests[0].syntaxError).toBe(true); + expect(result.tests[0].error.className).toBe('LiquidArgumentError'); + expect(result.tests[0].error.message).toContain('hash_merge filter'); + + // Second test - indented format with failures + expect(result.tests[1].path).toBe('tests/timezones/list_test'); + expect(result.tests[1].cases).toHaveLength(5); + expect(result.tests[1].cases[0].passed).toBe(true); // should not be blank + expect(result.tests[1].cases[1].passed).toBe(false); // translation missing + expect(result.tests[1].cases[1].error).toContain('translation missing'); + expect(result.tests[1].passed).toBe(false); + }); + }); + + test('handles empty response', () => { + const response = `Assertions: 0. Failed: 0. Time: 5ms`; + + const result = parseTestResponse(response); + + expect(result.tests).toHaveLength(0); + expect(result.summary.assertions).toBe(0); + }); + }); + + describe('extractJsonObjects', () => { + test('extracts single JSON object', () => { + const str = '{"path":"test"}'; + const result = extractJsonObjects(str); + + expect(result).toHaveLength(1); + expect(result[0].path).toBe('test'); + }); + + test('extracts multiple concatenated JSON objects', () => { + const str = '{"path":"test"}{"error":"failed"}'; + const result = extractJsonObjects(str); + + expect(result).toHaveLength(2); + expect(result[0].path).toBe('test'); + expect(result[1].error).toBe('failed'); + }); + + test('handles nested objects', () => { + const str = '{"path":"test","meta":{"count":5}}'; + const result = extractJsonObjects(str); + + expect(result).toHaveLength(1); + expect(result[0].meta.count).toBe(5); + }); + + test('returns empty array for non-JSON string', () => { + const str = 'not json at all'; + const result = extractJsonObjects(str); + + expect(result).toHaveLength(0); + }); + }); + + describe('testsRunTool', () => { + test('has correct description and inputSchema', () => { + expect(testsRunTool.description).toContain('tests'); + expect(testsRunTool.inputSchema.properties).toHaveProperty('env'); + expect(testsRunTool.inputSchema.properties).toHaveProperty('path'); + expect(testsRunTool.inputSchema.properties).toHaveProperty('name'); + expect(testsRunTool.inputSchema.required).toContain('name'); + }); + + test('returns parsed test results on success', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: `{"path":"tests/example"} +------------------------ +Assertions: 3. Failed: 0. Time: 100ms` + }); + + const result = await testsRunTool.handler( + { env: 'staging', name: 'example_test' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.summary.assertions).toBe(3); + expect(result.data.summary.failed).toBe(0); + expect(result.data.passed).toBe(true); + expect(result.data.tests).toHaveLength(1); + expect(mockRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: expect.stringContaining('/_tests/run?formatter=text') + })); + }); + + test('includes path filter in URL when provided', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: 'Assertions: 0. Failed: 0. Time: 10ms' + }); + + await testsRunTool.handler( + { env: 'staging', name: 'users_test', path: 'tests/users' }, + { request: mockRequest } + ); + + expect(mockRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: expect.stringContaining('path=tests%2Fusers') + })); + }); + + test('includes name filter in URL when provided', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: 'Assertions: 0. Failed: 0. Time: 10ms' + }); + + await testsRunTool.handler( + { env: 'staging', name: 'create_user_test' }, + { request: mockRequest } + ); + + expect(mockRequest).toHaveBeenCalledWith(expect.objectContaining({ + uri: expect.stringContaining('name=create_user_test') + })); + }); + + test('includes both path and name filters in URL when provided', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: 'Assertions: 0. Failed: 0. Time: 10ms' + }); + + await testsRunTool.handler( + { env: 'staging', path: 'tests/users', name: 'create_user_test' }, + { request: mockRequest } + ); + + const callUri = mockRequest.mock.calls[0][0].uri; + expect(callUri).toContain('path=tests%2Fusers'); + expect(callUri).toContain('name=create_user_test'); + }); + + test('returns error on HTTP failure', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 500, + body: 'Internal Server Error' + }); + + const result = await testsRunTool.handler( + { env: 'staging', name: 'some_test' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('HTTP_ERROR'); + expect(result.error.statusCode).toBe(500); + }); + + test('returns error on network failure', async () => { + const mockRequest = vi.fn().mockRejectedValue(new Error('Network error')); + + const result = await testsRunTool.handler( + { env: 'staging', name: 'some_test' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(false); + expect(result.error.code).toBe('TESTS_RUN_ERROR'); + expect(result.error.message).toContain('Network error'); + }); + + test('correctly identifies failed tests', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + statusCode: 200, + body: `{"path":"tests/failing"}{"class_name":"AssertionError","message":"Expected true"} +------------------------ +Assertions: 5. Failed: 2. Time: 150ms` + }); + + const result = await testsRunTool.handler( + { env: 'staging', name: 'failing_test' }, + { request: mockRequest } + ); + + expect(result.ok).toBe(true); + expect(result.data.passed).toBe(false); + expect(result.data.summary.failed).toBe(2); + }); + }); +}); diff --git a/mcp-min/__tests__/tools.test.js b/mcp-min/__tests__/tools.test.js new file mode 100644 index 000000000..5d01e59a4 --- /dev/null +++ b/mcp-min/__tests__/tools.test.js @@ -0,0 +1,96 @@ + +const os = require('os'); +const path = require('path'); +const fs = require('fs'); +const { pathToFileURL } = require('url'); +let singleFile; +let singleFileTool; +let computeRemotePath, normalizeLocalPath, toPosix; +beforeAll(async () => { + const mod = await import(pathToFileURL(path.resolve(process.cwd(), 'mcp-min', 'sync', 'single-file.js')).href); + singleFile = mod; + singleFileTool = mod.default; + ({ computeRemotePath, normalizeLocalPath, toPosix } = mod); +}); + +// Basic unit tests for helper functions and dry-run behavior + +describe('sync.singleFile helpers', () => { + test('toPosix converts backslashes to slashes', () => { + expect(toPosix('a\\b\\c')).toBe('a/b/c'); + }); + + test('computeRemotePath strips app/ prefix', () => { + expect(computeRemotePath('app/templates/example.html')).toBe('templates/example.html'); + }); + + test('normalizeLocalPath produces relative posix path', () => { + const tmp = path.resolve(process.cwd(), 'app', 'dummy.txt'); + const norm = normalizeLocalPath(tmp); + expect(norm).toMatch(/app\/(dummy.txt|dummy.txt)$/); + }); +}); + +describe('sync.singleFile handler dry-run', () => { + test('returns success true for dryRun when file path within allowed dirs', async () => { + const tmpDir = path.join(process.cwd(), 'tmp-app'); + const appDir = path.join(tmpDir, 'app'); + const tmpPath = path.join(appDir, 'assets', 'dummy.txt'); + // Create a temp file and directory structure + fs.mkdirSync(path.dirname(tmpPath), { recursive: true }); + fs.writeFileSync(tmpPath, 'hello'); + + // Run test from inside tmpDir so dir.APP detection works + const cwdOrig = process.cwd(); + process.chdir(tmpDir); + try { + const res = await singleFileTool.handler({ filePath: tmpPath, dryRun: true, url: 'https://example.com', email: 'a@b.c', token: 'tok' }, { transport: 'test' }); + expect(res.success).toBe(true); + expect(res.file.normalizedPath).toMatch(/app\/assets\/dummy.txt$/); + } finally { + process.chdir(cwdOrig); + fs.rmSync(tmpDir, { recursive: true, force: true }); + } + }); +}); + +describe('sync.singleFile with env parameter', () => { + let tmpDir; + let originalCwd; + + beforeEach(() => { + tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'sync-env-test-')); + originalCwd = process.cwd(); + process.chdir(tmpDir); + // Clear global MPKIT_* env vars so tests can control auth via .pos file + vi.stubEnv('MPKIT_URL', ''); + vi.stubEnv('MPKIT_EMAIL', ''); + vi.stubEnv('MPKIT_TOKEN', ''); + + fs.mkdirSync(path.join(tmpDir, 'app', 'assets'), { recursive: true }); + fs.writeFileSync(path.join(tmpDir, '.pos'), JSON.stringify({ + staging: { url: 'https://test.staging.com', token: 'test-token', email: 'test@test.com' } + })); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + process.chdir(originalCwd); + fs.rmSync(tmpDir, { recursive: true, force: true }); + }); + + test('resolves auth from .pos when env parameter is provided', async () => { + const appDir = path.join(tmpDir, 'app'); + fs.writeFileSync(path.join(appDir, 'assets', 'test.css'), 'body { color: red; }'); + + const res = await singleFileTool.handler({ + filePath: path.join(appDir, 'assets', 'test.css'), + env: 'staging', + dryRun: true + }, { transport: 'test' }); + + expect(res.success).toBe(true); + expect(res.auth.source).toBe('.pos(staging)'); + expect(res.auth.url).toBe('https://test.staging.com'); + }); +}); diff --git a/mcp-min/__tests__/uploads.push.test.js b/mcp-min/__tests__/uploads.push.test.js new file mode 100644 index 000000000..66f19cf06 --- /dev/null +++ b/mcp-min/__tests__/uploads.push.test.js @@ -0,0 +1,191 @@ + +import path from 'path'; +import { vi, describe, test, expect, beforeAll, afterAll, beforeEach, afterEach } from 'vitest'; +import fs from 'fs'; +import os from 'os'; + +// Import the tool directly +import uploadsTool from '../uploads/push.js'; + +// Mock settings that can be injected via context +const mockSettings = { + fetchSettings: (env) => { + if (env === 'staging') { + return { url: 'https://staging.example.com', email: 'test@example.com', token: 'secret123' }; + } + if (env === 'production') { + return { url: 'https://prod.example.com', email: 'prod@example.com', token: 'prodtoken' }; + } + return null; + } +}; + +describe('uploads-push', () => { + let tempFile; + + beforeEach(() => { + // Create a temp ZIP file for testing + tempFile = path.join(os.tmpdir(), 'test-uploads.zip'); + fs.writeFileSync(tempFile, 'fake zip content'); + }); + + afterEach(() => { + if (fs.existsSync(tempFile)) { + fs.unlinkSync(tempFile); + } + }); + + test('uploads file successfully with env and filePath', async () => { + class MockGateway { + async getInstance() { + return { id: 'test-instance-123' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://cdn.example.com/uploads.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue('https://s3.example.com/upload'); + + const res = await uploadsTool.handler( + { env: 'staging', filePath: tempFile }, + { Gateway: MockGateway, presignUrl: mockPresignUrl, uploadFile: mockUploadFile, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.instanceId).toBe('test-instance-123'); + expect(res.data.filePath).toBe(tempFile); + expect(res.data.accessUrl).toBe('https://cdn.example.com/uploads.zip'); + expect(res.meta.startedAt).toBeDefined(); + expect(res.meta.finishedAt).toBeDefined(); + + // Verify mocks were called with correct arguments + expect(mockPresignUrl).toHaveBeenCalledWith( + 'instances/test-instance-123/property_uploads/data.public_property_upload_import.zip', + tempFile + ); + expect(mockUploadFile).toHaveBeenCalledWith(tempFile, 'https://s3.example.com/upload'); + }); + + test('returns error when file not found', async () => { + const res = await uploadsTool.handler( + { env: 'staging', filePath: '/nonexistent/file.zip' }, + { settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('FILE_NOT_FOUND'); + expect(res.error.message).toContain('/nonexistent/file.zip'); + }); + + test('returns error when env not found', async () => { + const res = await uploadsTool.handler( + { env: 'unknown-env', filePath: tempFile }, + { settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('UPLOAD_FAILED'); + expect(res.error.message).toContain('unknown-env'); + expect(res.error.message).toContain('not found'); + }); + + test('has correct description and schema with required fields', () => { + expect(uploadsTool.description).toContain('ZIP'); + expect(uploadsTool.inputSchema.required).toContain('env'); + expect(uploadsTool.inputSchema.required).toContain('filePath'); + expect(uploadsTool.inputSchema.properties.env).toBeDefined(); + expect(uploadsTool.inputSchema.properties.filePath).toBeDefined(); + }); + + test('handles presignUrl failure', async () => { + class MockGateway { + async getInstance() { + return { id: 'test-instance-123' }; + } + } + + const mockPresignUrl = vi.fn().mockRejectedValue(new Error('S3 service unavailable')); + const mockUploadFile = vi.fn(); + + const res = await uploadsTool.handler( + { env: 'staging', filePath: tempFile }, + { Gateway: MockGateway, presignUrl: mockPresignUrl, uploadFile: mockUploadFile, settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('UPLOAD_FAILED'); + expect(res.error.message).toContain('S3 service unavailable'); + expect(mockUploadFile).not.toHaveBeenCalled(); + }); + + test('handles uploadFile failure', async () => { + class MockGateway { + async getInstance() { + return { id: 'test-instance-123' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://cdn.example.com/uploads.zip' + }); + const mockUploadFile = vi.fn().mockRejectedValue(new Error('Upload timeout')); + + const res = await uploadsTool.handler( + { env: 'staging', filePath: tempFile }, + { Gateway: MockGateway, presignUrl: mockPresignUrl, uploadFile: mockUploadFile, settings: mockSettings } + ); + + expect(res.ok).toBe(false); + expect(res.error.code).toBe('UPLOAD_FAILED'); + expect(res.error.message).toContain('Upload timeout'); + }); + + test('sets MARKETPLACE env vars for presignUrl', async () => { + class MockGateway { + async getInstance() { + return { id: 'inst-001' }; + } + } + + let capturedToken, capturedUrl; + const mockPresignUrl = vi.fn().mockImplementation(() => { + capturedToken = process.env.MARKETPLACE_TOKEN; + capturedUrl = process.env.MARKETPLACE_URL; + return Promise.resolve({ uploadUrl: 'https://s3.example.com/upload', accessUrl: 'https://cdn.example.com/file.zip' }); + }); + const mockUploadFile = vi.fn().mockResolvedValue('ok'); + + await uploadsTool.handler( + { env: 'staging', filePath: tempFile }, + { Gateway: MockGateway, presignUrl: mockPresignUrl, uploadFile: mockUploadFile, settings: mockSettings } + ); + + expect(capturedToken).toBe('secret123'); + expect(capturedUrl).toBe('https://staging.example.com'); + }); + + test('works with production environment', async () => { + class MockGateway { + async getInstance() { + return { id: 'prod-instance' }; + } + } + + const mockPresignUrl = vi.fn().mockResolvedValue({ + uploadUrl: 'https://s3.example.com/upload', + accessUrl: 'https://cdn.example.com/uploads.zip' + }); + const mockUploadFile = vi.fn().mockResolvedValue('ok'); + + const res = await uploadsTool.handler( + { env: 'production', filePath: tempFile }, + { Gateway: MockGateway, presignUrl: mockPresignUrl, uploadFile: mockUploadFile, settings: mockSettings } + ); + + expect(res.ok).toBe(true); + expect(res.data.instanceId).toBe('prod-instance'); + }); +}); diff --git a/mcp-min/check/index.js b/mcp-min/check/index.js new file mode 100644 index 000000000..7030d3cbf --- /dev/null +++ b/mcp-min/check/index.js @@ -0,0 +1,192 @@ +// platformos.check - run platformos-check linter on the app +import { exec } from 'child_process'; +import { promisify } from 'util'; + +const execAsync = promisify(exec); + +const checkTool = { + description: 'Run the platformos-check linter to analyze the app for best practice violations, errors, and style issues in Liquid and JSON files. Returns structured results (JSON by default). Supports category filtering, auto-correct, and custom config. Requires the platformos-check gem installed.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + appPath: { + type: 'string', + description: 'Path to the platformOS app (default: current directory)' + }, + format: { + type: 'string', + enum: ['text', 'json'], + description: 'Output format (default: json)', + default: 'json' + }, + category: { + type: 'array', + items: { type: 'string' }, + description: 'Only run checks matching these categories (can specify multiple)' + }, + excludeCategory: { + type: 'array', + items: { type: 'string' }, + description: 'Exclude checks matching these categories (can specify multiple)' + }, + autoCorrect: { + type: 'boolean', + description: 'Automatically fix offenses', + default: false + }, + failLevel: { + type: 'string', + enum: ['error', 'suggestion', 'style'], + description: 'Minimum severity level to fail with error code' + }, + config: { + type: 'string', + description: 'Path to custom .platformos-check.yml config file' + }, + list: { + type: 'boolean', + description: 'List enabled checks without running them', + default: false + }, + print: { + type: 'boolean', + description: 'Print active config to STDOUT', + default: false + } + } + }, + handler: async (params = {}) => { + const startedAt = new Date().toISOString(); + + // Extract parameters used in both try and catch blocks + const appPath = params.appPath || '.'; + const format = params.format || 'json'; + const autoCorrect = !!params.autoCorrect; + const listChecks = !!params.list; + const printConfig = !!params.print; + + try { + + // Build command + let command = 'platformos-check'; + + // Add options + if (listChecks) { + command += ' --list'; + } else if (printConfig) { + command += ' --print'; + } else { + // Only add format for normal checks + command += ` --output ${format}`; + + if (autoCorrect) { + command += ' --auto-correct'; + } + + if (params.failLevel) { + command += ` --fail-level ${params.failLevel}`; + } + + // Add categories + if (params.category && Array.isArray(params.category)) { + for (const cat of params.category) { + command += ` --category ${cat}`; + } + } + + if (params.excludeCategory && Array.isArray(params.excludeCategory)) { + for (const cat of params.excludeCategory) { + command += ` --exclude-category ${cat}`; + } + } + + if (params.config) { + command += ` --config ${params.config}`; + } + } + + // Add app path + command += ` "${appPath}"`; + + const { stdout, stderr } = await execAsync(command, { maxBuffer: 10 * 1024 * 1024 }); + + // Parse output + let result = stdout; + if (format === 'json' && !listChecks && !printConfig) { + try { + result = JSON.parse(stdout); + } catch { + // If JSON parse fails, return as raw string + result = { raw: stdout }; + } + } + + return { + ok: true, + data: { + result, + format, + appPath, + listChecks, + printConfig, + autoCorrect: autoCorrect + }, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + stderr: stderr ? stderr.trim() : undefined + } + }; + } catch (error) { + // platformos-check returns non-zero exit code when issues found or no files + // We still want to return the output as successful (with flags for context) + const output = error.stdout || error.stderr || ''; + const noFilesFound = output.includes('No platformos_app files found'); + + // Treat as successful if it's a known linter condition (not a real error) + if (noFilesFound || error.stdout) { + let result = error.stdout || error.stderr; + + if (format === 'json' && result && !noFilesFound) { + try { + result = JSON.parse(result); + } catch { + result = { raw: result }; + } + } + + return { + ok: true, + data: { + result: result || 'No platformos_app files found', + format, + appPath, + issues_found: !noFilesFound, + no_files_found: noFilesFound, + autoCorrect: autoCorrect, + exit_code: error.code + }, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + stderr: error.stderr ? error.stderr.trim() : undefined + } + }; + } + + // Actual error (not linter findings) + return { + ok: false, + error: { + code: 'CHECK_ERROR', + message: error.message || String(error), + stderr: error.stderr, + exit_code: error.code + } + }; + } + } +}; + +export default checkTool; diff --git a/mcp-min/check/run.js b/mcp-min/check/run.js new file mode 100644 index 000000000..a41f4708d --- /dev/null +++ b/mcp-min/check/run.js @@ -0,0 +1,179 @@ +// platformos.check-run - run platformos-check Node.js linter (no Ruby required) +import fs from 'fs'; +import path from 'path'; + +const Severity = { + ERROR: 0, + WARNING: 1, + INFO: 2 +}; + +const severityToLabel = (severity) => { + switch (severity) { + case Severity.ERROR: return 'error'; + case Severity.WARNING: return 'warning'; + case Severity.INFO: return 'info'; + default: return 'unknown'; + } +}; + +const uriToPath = (uri) => uri.replace('file://', ''); + +const countBySeverity = (offenses) => { + return offenses.reduce((counts, offense) => { + switch (offense.severity) { + case Severity.ERROR: counts.errors++; break; + case Severity.WARNING: counts.warnings++; break; + case Severity.INFO: counts.info++; break; + } + return counts; + }, { errors: 0, warnings: 0, info: 0 }); +}; + +const checkRunTool = { + description: 'Run platformos-check Node.js linter on the app. Analyzes Liquid/JSON files for violations. No Ruby required. Returns structured JSON with offenses grouped by file.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + appPath: { + type: 'string', + description: 'Path to the platformOS app directory to check (default: current directory)' + }, + autoFix: { + type: 'boolean', + description: 'Automatically fix offenses, then re-check and return remaining issues', + default: false + } + } + }, + handler: async (params = {}) => { + const startedAt = new Date().toISOString(); + const appPath = params.appPath || '.'; + const autoFix = !!params.autoFix; + + // Validate path exists + if (!fs.existsSync(appPath)) { + return { + ok: false, + error: { + code: 'PATH_NOT_FOUND', + message: `Path does not exist: ${appPath}` + } + }; + } + + // Validate path is a directory + const stats = fs.statSync(appPath); + if (!stats.isDirectory()) { + return { + ok: false, + error: { + code: 'NOT_A_DIRECTORY', + message: `Path is not a directory: ${appPath}` + } + }; + } + + // Dynamically import @platformos/platformos-check-node + let themeCheck; + try { + themeCheck = await import('@platformos/platformos-check-node'); + } catch { + return { + ok: false, + error: { + code: 'MISSING_DEPENDENCY', + message: 'The @platformos/platformos-check-node package is not installed. Install it with: npm install @platformos/platformos-check-node' + } + }; + } + + try { + const checkPath = path.resolve(appPath); + + // Resolve .platformos-check.yml explicitly — upstream findConfigPath + // only discovers .theme-check.yml + const configFile = path.join(checkPath, '.platformos-check.yml'); + const configPath = fs.existsSync(configFile) ? configFile : undefined; + + // Run checks + const result = await themeCheck.themeCheckRun(checkPath, configPath); + let offenses = result.offenses; + const filesChecked = result.theme ? result.theme.length : 0; + + // Auto-fix if requested + let autoFixed = false; + if (autoFix && offenses.length > 0) { + await themeCheck.autofix(result.theme, offenses); + autoFixed = true; + + // Re-run check after autofix + const recheck = await themeCheck.themeCheckRun(checkPath, configPath); + offenses = recheck.offenses; + } + + // Group offenses by file + const grouped = {}; + for (const offense of offenses) { + const absolutePath = uriToPath(offense.uri); + const filePath = path.relative(checkPath, absolutePath); + if (!grouped[filePath]) { + grouped[filePath] = []; + } + grouped[filePath].push(offense); + } + + // Build per-file results + const files = Object.entries(grouped).map(([filePath, fileOffenses]) => { + const counts = countBySeverity(fileOffenses); + return { + path: filePath, + offenses: fileOffenses.map(offense => ({ + check: offense.check, + severity: severityToLabel(offense.severity), + start_row: offense.start.line, + start_column: offense.start.character, + end_row: offense.end.line, + end_column: offense.end.character, + message: offense.message + })), + errorCount: counts.errors, + warningCount: counts.warnings, + infoCount: counts.info + }; + }); + + const totalCounts = countBySeverity(offenses); + + return { + ok: true, + data: { + offenseCount: offenses.length, + fileCount: Object.keys(grouped).length, + errorCount: totalCounts.errors, + warningCount: totalCounts.warnings, + infoCount: totalCounts.info, + filesChecked, + autoFixed, + files + }, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + appPath: checkPath + } + }; + } catch (error) { + return { + ok: false, + error: { + code: 'CHECK_RUN_ERROR', + message: error.message || String(error) + } + }; + } + } +}; + +export default checkRunTool; diff --git a/mcp-min/config.js b/mcp-min/config.js new file mode 100644 index 000000000..a886599c5 --- /dev/null +++ b/mcp-min/config.js @@ -0,0 +1,7 @@ +// Centralized config for mcp-min +export const DEBUG = !!( + process.env.DEBUG && process.env.DEBUG !== '0' && process.env.DEBUG.toLowerCase?.() !== 'false' +) || !!( + process.env.MCP_MIN_DEBUG && process.env.MCP_MIN_DEBUG !== '0' && process.env.MCP_MIN_DEBUG.toLowerCase?.() !== 'false' +); + diff --git a/mcp-min/constants/list.js b/mcp-min/constants/list.js new file mode 100644 index 000000000..5d97ef632 --- /dev/null +++ b/mcp-min/constants/list.js @@ -0,0 +1,80 @@ +// platformos.constants.list tool - list all constants +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +const QUERY = `query getConstants { + constants(per_page: 99) { + results { name, value, updated_at } + } +}`; + +const constantsListTool = { + description: 'List all constants configured on a platformOS instance.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env'], + properties: { + env: { type: 'string', description: 'Environment name from .pos config' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + + try { + const auth = await resolveAuth(params.env, ctx.settings || settings); + + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const resp = await gateway.graph({ query: QUERY }); + + if (resp && Array.isArray(resp.errors) && resp.errors.length > 0) { + return { + ok: false, + error: { + code: 'GRAPHQL_ERROR', + message: resp.errors[0]?.message || 'GraphQL error' + } + }; + } + + const constants = resp?.data?.constants?.results || []; + + return { + ok: true, + data: { + constants: constants.map(c => ({ + name: c.name, + value: c.value, + updatedAt: c.updated_at + })), + count: constants.length + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + return { + ok: false, + error: { code: 'CONSTANTS_LIST_FAILED', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default constantsListTool; diff --git a/mcp-min/constants/set.js b/mcp-min/constants/set.js new file mode 100644 index 000000000..fc3aa2336 --- /dev/null +++ b/mcp-min/constants/set.js @@ -0,0 +1,84 @@ +// platformos.constants.set tool - set a constant +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +function buildSetMutation(name, value) { + // Escape quotes in name and value for GraphQL string + const escapedName = name.replace(/"/g, '\\"'); + const escapedValue = value.replace(/"/g, '\\"'); + return `mutation { + constant_set(name: "${escapedName}", value: "${escapedValue}") { + name, value + } + }`; +} + +const constantsSetTool = { + description: 'Set a constant on a platformOS instance. Creates or updates the constant.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env', 'name', 'value'], + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + name: { type: 'string', description: 'Name of the constant (e.g., API_KEY)' }, + value: { type: 'string', description: 'Value of the constant' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + + try { + const auth = await resolveAuth(params.env, ctx.settings || settings); + + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const query = buildSetMutation(params.name, params.value); + const resp = await gateway.graph({ query }); + + if (resp && Array.isArray(resp.errors) && resp.errors.length > 0) { + return { + ok: false, + error: { + code: 'GRAPHQL_ERROR', + message: resp.errors[0]?.message || 'GraphQL error' + } + }; + } + + const result = resp?.data?.constant_set; + + return { + ok: true, + data: { + name: result?.name, + value: result?.value + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + return { + ok: false, + error: { code: 'CONSTANTS_SET_FAILED', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default constantsSetTool; diff --git a/mcp-min/constants/unset.js b/mcp-min/constants/unset.js new file mode 100644 index 000000000..8e105d2c3 --- /dev/null +++ b/mcp-min/constants/unset.js @@ -0,0 +1,81 @@ +// platformos.constants.unset tool - delete a constant +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +function buildUnsetMutation(name) { + const escapedName = name.replace(/"/g, '\\"'); + return `mutation { + constant_unset(name: "${escapedName}") { + name + } + }`; +} + +const constantsUnsetTool = { + description: 'Delete a constant from a platformOS instance.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env', 'name'], + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + name: { type: 'string', description: 'Name of the constant to delete' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + + try { + const auth = await resolveAuth(params.env, ctx.settings || settings); + + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const query = buildUnsetMutation(params.name); + const resp = await gateway.graph({ query }); + + if (resp && Array.isArray(resp.errors) && resp.errors.length > 0) { + return { + ok: false, + error: { + code: 'GRAPHQL_ERROR', + message: resp.errors[0]?.message || 'GraphQL error' + } + }; + } + + const result = resp?.data?.constant_unset; + + return { + ok: true, + data: { + name: result?.name || params.name, + deleted: !!result + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + return { + ok: false, + error: { code: 'CONSTANTS_UNSET_FAILED', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default constantsUnsetTool; diff --git a/mcp-min/data/clean-status.js b/mcp-min/data/clean-status.js new file mode 100644 index 000000000..b1f73d17a --- /dev/null +++ b/mcp-min/data/clean-status.js @@ -0,0 +1,89 @@ +// platformos.data.clean.status - check the status of a data clean job +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const dataCleanStatusTool = { + description: 'Check the status of a data clean job. Poll until status is "done" or "failed".', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + jobId: { type: 'string', description: 'Clean job ID returned from data-clean' } + }, + required: ['jobId'] + }, + handler: async (params, ctx = {}) => { + log.debug('tool:data-clean-status invoked', { jobId: params?.jobId }); + + try { + const auth = await resolveAuth(params); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const { jobId } = params; + + if (!jobId) { + return { ok: false, error: { code: 'VALIDATION_ERROR', message: 'jobId is required' } }; + } + + const response = await gateway.dataCleanStatus(jobId); + + // Normalize status - it may be an object with .name or a string + const status = response.status?.name || response.status; + + return { + ok: true, + data: { + id: jobId, + status, + done: status === 'done', + failed: status === 'failed', + pending: ['pending', 'processing', 'scheduled'].includes(status), + response + }, + meta: { + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + log.error('tool:data-clean-status error', { error: String(e) }); + return { ok: false, error: { code: 'DATA_CLEAN_STATUS_ERROR', message: String(e.message || e) } }; + } + } +}; + +export default dataCleanStatusTool; diff --git a/mcp-min/data/clean.js b/mcp-min/data/clean.js new file mode 100644 index 000000000..1f38507e0 --- /dev/null +++ b/mcp-min/data/clean.js @@ -0,0 +1,121 @@ +// platformos.data.clean - start data clean operation (removes data from instance) +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +const CONFIRMATION_TEXT = 'CLEAN DATA'; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const dataCleanTool = { + description: 'Start data clean operation to remove data from a platformOS instance. DESTRUCTIVE - requires confirmation. Returns job ID for status polling.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + confirmation: { + type: 'string', + description: `Confirmation text - must be exactly "${CONFIRMATION_TEXT}" to proceed` + }, + includeSchema: { + type: 'boolean', + description: 'Also remove instance files (pages, schemas, etc.). Default: false', + default: false + } + }, + required: ['confirmation'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:data-clean invoked', { env: params?.env, includeSchema: params?.includeSchema }); + + try { + // Validate confirmation + if (params.confirmation !== CONFIRMATION_TEXT) { + return { + ok: false, + error: { + code: 'CONFIRMATION_REQUIRED', + message: `Confirmation text must be exactly "${CONFIRMATION_TEXT}". This is a destructive operation.`, + expected: CONFIRMATION_TEXT, + received: params.confirmation + } + }; + } + + const auth = await resolveAuth(params); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const includeSchema = !!params.includeSchema; + + const response = await gateway.dataClean(CONFIRMATION_TEXT, includeSchema); + + return { + ok: true, + data: { + id: response.id, + status: response.status || 'pending', + includeSchema + }, + warning: includeSchema + ? 'This will remove ALL data AND schema files (pages, schemas, etc.) from the instance!' + : 'This will remove ALL data from the instance!', + meta: { + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + log.error('tool:data-clean error', { error: String(e) }); + + // Handle specific error cases + if (e.statusCode === 422) { + return { + ok: false, + error: { + code: 'NOT_SUPPORTED', + message: 'Data clean is either not supported by the server or has been disabled.', + statusCode: 422 + } + }; + } + + return { ok: false, error: { code: 'DATA_CLEAN_ERROR', message: String(e.message || e) } }; + } + } +}; + +export default dataCleanTool; diff --git a/mcp-min/data/export-status.js b/mcp-min/data/export-status.js new file mode 100644 index 000000000..9c9788100 --- /dev/null +++ b/mcp-min/data/export-status.js @@ -0,0 +1,105 @@ +// platformos.data.export.status - check the status of a data export job +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const dataExportStatusTool = { + description: 'Check the status of a data export job. When done, returns data (JSON) or zip_file_url (ZIP).', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + jobId: { type: 'string', description: 'Export job ID returned from data-export' }, + isZip: { type: 'boolean', description: 'Whether the export is a ZIP file', default: false } + }, + required: ['jobId'] + }, + handler: async (params, ctx = {}) => { + log.debug('tool:data-export-status invoked', { jobId: params?.jobId }); + + try { + const auth = await resolveAuth(params); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const { jobId, isZip = false } = params; + + if (!jobId) { + return { ok: false, error: { code: 'VALIDATION_ERROR', message: 'jobId is required' } }; + } + + const response = await gateway.dataExportStatus(jobId, isZip); + + // Normalize status + const status = response.status?.name || response.status; + + const result = { + ok: true, + data: { + id: jobId, + status, + done: status === 'done', + failed: status === 'failed', + pending: ['pending', 'processing', 'scheduled'].includes(status) + }, + meta: { + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + + // Include export data when done + if (status === 'done') { + if (isZip && response.zip_file_url) { + result.data.zipFileUrl = response.zip_file_url; + } else if (response.data) { + // Transform data to standard format + result.data.exportedData = { + users: response.data.users?.results || [], + transactables: response.data.transactables?.results || [], + models: response.data.models?.results || [] + }; + } + } + + return result; + } catch (e) { + log.error('tool:data-export-status error', { error: String(e) }); + return { ok: false, error: { code: 'DATA_EXPORT_STATUS_ERROR', message: String(e.message || e) } }; + } + } +}; + +export default dataExportStatusTool; diff --git a/mcp-min/data/export.js b/mcp-min/data/export.js new file mode 100644 index 000000000..349a20d2a --- /dev/null +++ b/mcp-min/data/export.js @@ -0,0 +1,103 @@ +// platformos.data.export - start data export from platformOS instance +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const dataExportTool = { + description: 'Start data export from platformOS instance. Returns job ID for status polling. When complete, status will include data or zip_file_url.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + exportInternalIds: { + type: 'boolean', + description: 'Use internal object IDs instead of external_id in exported data', + default: false + }, + zip: { + type: 'boolean', + description: 'Export as ZIP archive (returns download URL when complete)', + default: false + } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:data-export invoked', { env: params?.env, zip: params?.zip }); + + try { + const auth = await resolveAuth(params); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const exportInternalIds = !!params.exportInternalIds; + const isZip = !!params.zip; + + const exportTask = await gateway.dataExportStart(exportInternalIds, isZip); + + return { + ok: true, + data: { + id: exportTask.id, + status: exportTask.status || 'pending', + isZip + }, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + log.error('tool:data-export error', { error: String(e) }); + + if (e.statusCode === 404) { + return { + ok: false, + error: { + code: 'NOT_SUPPORTED', + message: 'Data export is not supported by the server.', + statusCode: 404 + } + }; + } + + return { ok: false, error: { code: 'DATA_EXPORT_ERROR', message: String(e.message || e) } }; + } + } +}; + +export default dataExportTool; diff --git a/mcp-min/data/import-status.js b/mcp-min/data/import-status.js new file mode 100644 index 000000000..aea12b6b6 --- /dev/null +++ b/mcp-min/data/import-status.js @@ -0,0 +1,75 @@ +// platformos.data.import.status - check the status of a data import job +import log from '../log.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +const dataImportStatusTool = { + description: 'Check the status of a data import job. Poll until status is "done" or "failed".', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env', 'jobId'], + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + jobId: { type: 'string', description: 'Import job ID returned from data-import' } + } + }, + handler: async (params, ctx = {}) => { + log.debug('tool:data-import-status invoked', { jobId: params?.jobId }); + const startedAt = new Date().toISOString(); + + try { + if (!params.jobId) { + return { ok: false, error: { code: 'VALIDATION_ERROR', message: 'jobId is required' } }; + } + + const auth = await resolveAuth(params.env, ctx.settings || settings); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + const { jobId } = params; + + // Always use ZIP status endpoint (isZip=true) since we convert JSON to ZIP + const response = await gateway.dataImportStatus(jobId, true); + + // Normalize status - it may be an object with .name or a string + const status = response.status?.name || response.status; + + return { + ok: true, + data: { + id: jobId, + status, + done: status === 'done', + failed: status === 'failed', + pending: ['pending', 'processing', 'scheduled'].includes(status), + response + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + log.error('tool:data-import-status error', { error: String(e) }); + return { + ok: false, + error: { code: 'DATA_IMPORT_STATUS_ERROR', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default dataImportStatusTool; diff --git a/mcp-min/data/import.js b/mcp-min/data/import.js new file mode 100644 index 000000000..a387b483d --- /dev/null +++ b/mcp-min/data/import.js @@ -0,0 +1,208 @@ +// platformos.data.import - start a data import from JSON or ZIP +// JSON is converted to CSV/ZIP format internally (JSON import is deprecated) +import crypto from 'crypto'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import { jsonToZipBuffer } from './json-to-csv.js'; +import { validateRecords, validateJsonStructure } from './validate.js'; +import log from '../log.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; +import isValidJSON from '../../lib/data/isValidJSON.js'; +import { presignUrl } from '../../lib/presignUrl.js'; +import { uploadFile } from '../../lib/s3UploadFile.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +async function uploadZipBuffer(buffer, gateway, presignUrlFn, uploadFileFn) { + // Write buffer to temp file for upload + const tmpFile = path.join(os.tmpdir(), `pos-import-${crypto.randomBytes(8).toString('hex')}.zip`); + fs.writeFileSync(tmpFile, buffer); + + try { + const instanceId = (await gateway.getInstance()).id; + const s3Path = `instances/${instanceId}/data_imports/${crypto.randomBytes(32).toString('hex')}.zip`; + const { uploadUrl, accessUrl } = await presignUrlFn(s3Path, tmpFile); + await uploadFileFn(tmpFile, uploadUrl); + return accessUrl; + } finally { + // Cleanup temp file + if (fs.existsSync(tmpFile)) { + fs.unlinkSync(tmpFile); + } + } +} + +const dataImportTool = { + description: 'Import data to platformOS instance. Accepts JSON (converted to CSV internally) or ZIP file with CSV files.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env'], + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + filePath: { type: 'string', description: 'Path to JSON or ZIP file to import' }, + jsonData: { type: 'object', description: 'JSON data object to import (records, users)' }, + zipFileUrl: { type: 'string', description: 'Remote URL of ZIP archive to import' }, + validate: { type: 'boolean', description: 'Validate records before import (default: true)' }, + strictTypes: { type: 'boolean', description: 'Enforce type checking against schema (default: true)' }, + strictProperties: { type: 'boolean', description: 'Error on properties not defined in schema (default: false)' }, + appPath: { type: 'string', description: 'Path to the app directory containing schema files (default: ".")' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:data-import invoked', { env: params.env }); + + try { + const auth = await resolveAuth(params.env, ctx.settings || settings); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + // Set env vars needed by presignUrl + process.env.MARKETPLACE_TOKEN = auth.token; + process.env.MARKETPLACE_URL = auth.url; + + const presignUrlFn = ctx.presignUrl || presignUrl; + const uploadFileFn = ctx.uploadFile || uploadFile; + + const { + filePath, + jsonData, + zipFileUrl, + validate = true, + strictTypes = true, + strictProperties = false, + appPath = '.' + } = params; + + // Validate: exactly one data source must be provided + const sources = [filePath, jsonData, zipFileUrl].filter(Boolean); + if (sources.length === 0) { + return { + ok: false, + error: { code: 'VALIDATION_ERROR', message: 'Provide one of: filePath, jsonData, or zipFileUrl' } + }; + } + if (sources.length > 1) { + return { + ok: false, + error: { code: 'VALIDATION_ERROR', message: 'Provide only one of: filePath, jsonData, or zipFileUrl' } + }; + } + + let zipUrl; + + if (zipFileUrl) { + // Remote ZIP URL provided directly + zipUrl = zipFileUrl; + } else if (filePath) { + const resolved = path.resolve(String(filePath)); + if (!fs.existsSync(resolved)) { + return { ok: false, error: { code: 'FILE_NOT_FOUND', message: `File not found: ${resolved}` } }; + } + + const ext = path.extname(resolved).toLowerCase(); + if (ext === '.zip') { + // Upload ZIP directly + const instanceId = (await gateway.getInstance()).id; + const s3Path = `instances/${instanceId}/data_imports/${crypto.randomBytes(32).toString('hex')}.zip`; + const { uploadUrl, accessUrl } = await presignUrlFn(s3Path, resolved); + await uploadFileFn(resolved, uploadUrl); + zipUrl = accessUrl; + } else { + // Assume JSON file - convert to ZIP + const data = fs.readFileSync(resolved, 'utf8'); + if (!isValidJSON(data)) { + return { + ok: false, + error: { code: 'INVALID_JSON', message: `Invalid JSON in file: ${resolved}` } + }; + } + const parsed = JSON.parse(data); + + // Validate top-level structure + if (validate) { + const structureResult = validateJsonStructure(parsed); + if (!structureResult.ok) { + return structureResult; + } + } + + // Validate records before import if enabled + if (validate && parsed.records && Array.isArray(parsed.records)) { + const validationResult = await validateRecords(parsed.records, { + appPath, + strictTypes, + strictProperties + }); + if (!validationResult.ok) { + return validationResult; + } + } + + const zipBuffer = await jsonToZipBuffer(parsed); + zipUrl = await uploadZipBuffer(zipBuffer, gateway, presignUrlFn, uploadFileFn); + } + } else if (jsonData) { + // Validate top-level structure + if (validate) { + const structureResult = validateJsonStructure(jsonData); + if (!structureResult.ok) { + return structureResult; + } + } + + // Validate records before import if enabled + if (validate && jsonData.records && Array.isArray(jsonData.records)) { + const validationResult = await validateRecords(jsonData.records, { + appPath, + strictTypes, + strictProperties + }); + if (!validationResult.ok) { + return validationResult; + } + } + + // JSON data provided directly - convert to ZIP + const zipBuffer = await jsonToZipBuffer(jsonData); + zipUrl = await uploadZipBuffer(zipBuffer, gateway, presignUrlFn, uploadFileFn); + } + + const formData = { zip_file_url: zipUrl }; + const importTask = await gateway.dataImportStart(formData); + + return { + ok: true, + data: { + id: importTask.id, + status: importTask.status + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + log.error('tool:data-import error', { error: String(e) }); + return { + ok: false, + error: { code: 'DATA_IMPORT_ERROR', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default dataImportTool; diff --git a/mcp-min/data/json-to-csv.js b/mcp-min/data/json-to-csv.js new file mode 100644 index 000000000..d8a6ad5cf --- /dev/null +++ b/mcp-min/data/json-to-csv.js @@ -0,0 +1,103 @@ +// Convert JSON import format to CSV files for ZIP import +import { ZipFile } from 'yazl'; + +// CSV column definitions based on platformOS format +const RECORDS_COLUMNS = ['id', 'user_id', 'created_at', 'updated_at', 'properties', 'model_schema']; +const USERS_COLUMNS = ['id', 'email', 'encrypted_password', 'created_at', 'updated_at', 'name', 'authentication_token', 'slug', 'time_zone', 'first_name', 'middle_name', 'last_name', 'external_id', 'properties']; + +function escapeCSVValue(value) { + if (value === null || value === undefined) { + return ''; + } + + // Convert objects to JSON strings + if (typeof value === 'object') { + value = JSON.stringify(value); + } else { + value = String(value); + } + + // Escape quotes and wrap in quotes if contains comma, quote, or newline + if (value.includes(',') || value.includes('"') || value.includes('\n') || value.includes('\r')) { + return '"' + value.replace(/"/g, '""') + '"'; + } + + return value; +} + +function recordToCSVRow(record, columns) { + return columns.map(col => { + if (col === 'properties') { + // Properties should be JSON object, ensure it's stringified + const props = record.properties || record.customizations || {}; + return escapeCSVValue(props); + } + if (col === 'model_schema') { + // model_schema can be in different fields + return escapeCSVValue(record.model_schema || record.table || record.type || ''); + } + return escapeCSVValue(record[col]); + }).join(','); +} + +function userToCSVRow(user, columns) { + return columns.map(col => { + if (col === 'properties') { + const props = user.properties || {}; + return escapeCSVValue(props); + } + return escapeCSVValue(user[col]); + }).join(','); +} + +function generateRecordsCSV(records) { + if (!records || records.length === 0) return null; + + const header = RECORDS_COLUMNS.join(','); + const rows = records.map(r => recordToCSVRow(r, RECORDS_COLUMNS)); + + return header + '\n' + rows.join('\n'); +} + +function generateUsersCSV(users) { + if (!users || users.length === 0) return null; + + const header = USERS_COLUMNS.join(','); + const rows = users.map(u => userToCSVRow(u, USERS_COLUMNS)); + + return header + '\n' + rows.join('\n'); +} + +/** + * Convert JSON data to ZIP buffer containing CSV files + * @param {Object} jsonData - Data in platformOS JSON import format + * @returns {Promise} - ZIP file as buffer + */ +export async function jsonToZipBuffer(jsonData) { + return new Promise((resolve, reject) => { + const chunks = []; + const zipFile = new ZipFile(); + + zipFile.outputStream.on('data', chunk => chunks.push(chunk)); + zipFile.outputStream.on('end', () => resolve(Buffer.concat(chunks))); + zipFile.outputStream.on('error', reject); + + // Handle records (can be in 'records', 'models', or 'transactables') + const records = jsonData.records || jsonData.models || jsonData.transactables || []; + const recordsCSV = generateRecordsCSV(records); + if (recordsCSV) { + zipFile.addBuffer(Buffer.from(recordsCSV, 'utf8'), 'records.csv', { compress: true }); + } + + // Handle users + const users = jsonData.users || []; + const usersCSV = generateUsersCSV(users); + if (usersCSV) { + zipFile.addBuffer(Buffer.from(usersCSV, 'utf8'), 'users.csv', { compress: true }); + } + + zipFile.end(); + }); +} + +export default jsonToZipBuffer; diff --git a/mcp-min/data/validate-schemas.js b/mcp-min/data/validate-schemas.js new file mode 100644 index 000000000..0e93e0539 --- /dev/null +++ b/mcp-min/data/validate-schemas.js @@ -0,0 +1,211 @@ +// Schema loading utility for platformOS data validation +// Loads schema files from app/schema/ or app/model_schemas/ + +import { createRequire } from 'module'; +const require = createRequire(import.meta.url); +const fs = require('fs'); +const path = require('path'); + +/** + * Simple YAML parser for platformOS schema format + * Handles the specific structure: name, properties array with name/type/options + * Does not depend on js-yaml + */ +function parseSchemaYaml(content) { + const lines = content.split('\n'); + const schema = { name: '', properties: [] }; + + let currentProperty = null; + let inProperties = false; + let inOptions = false; + let inVersions = false; + let propertyIndent = 0; + let optionsIndent = 0; + let versionsIndent = 0; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + const trimmed = line.trim(); + + // Skip empty lines and comments + if (!trimmed || trimmed.startsWith('#')) continue; + + // Calculate indentation + const indent = line.length - line.trimStart().length; + + // Parse name field at root level + if (trimmed.startsWith('name:') && indent === 0) { + schema.name = trimmed.slice(5).trim(); + continue; + } + + // Detect properties array start + if (trimmed === 'properties:' && indent === 0) { + inProperties = true; + inOptions = false; + inVersions = false; + continue; + } + + // Parse version items first (must check before property items to avoid confusion) + // Version items are deeper inside options + if (inVersions && trimmed.startsWith('- name:') && indent > versionsIndent) { + const versionName = trimmed.slice(7).trim(); + currentProperty.options.versions.push({ name: versionName }); + continue; + } + + // Parse property list items (starts with "- name:") + // Only match at the property level, not deeper inside versions + if (inProperties && trimmed.startsWith('- name:') && (!inVersions || indent <= versionsIndent)) { + // Save previous property if exists + if (currentProperty) { + schema.properties.push(currentProperty); + } + currentProperty = { + name: trimmed.slice(7).trim(), + type: 'string' // default + }; + propertyIndent = indent; + inOptions = false; + inVersions = false; + continue; + } + + // Parse property type (at property indent level + 2 or more) + if (currentProperty && !inVersions && trimmed.startsWith('type:') && indent > propertyIndent) { + currentProperty.type = trimmed.slice(5).trim(); + continue; + } + + // Detect options section + if (currentProperty && trimmed === 'options:' && indent > propertyIndent) { + currentProperty.options = {}; + inOptions = true; + optionsIndent = indent; + inVersions = false; + continue; + } + + // Detect versions array in options + if (inOptions && trimmed === 'versions:' && indent > optionsIndent) { + currentProperty.options.versions = []; + inVersions = true; + versionsIndent = indent; + continue; + } + + // Check if we've left versions section (indent went back) + if (inVersions && indent <= versionsIndent && !trimmed.startsWith('- ')) { + inVersions = false; + } + + // Check if we've left options section + if (inOptions && indent <= optionsIndent && trimmed !== 'versions:') { + // If this is a new property definition, let the next iteration handle it + if (trimmed.startsWith('- name:')) { + inOptions = false; + inVersions = false; + // Re-process this line + i--; + continue; + } + } + + // Parse content_length option (simple key: value in options) + if (inOptions && !inVersions && trimmed.includes(':') && indent > optionsIndent) { + const colonIdx = trimmed.indexOf(':'); + const key = trimmed.slice(0, colonIdx).trim(); + const value = trimmed.slice(colonIdx + 1).trim(); + if (key && value) { + currentProperty.options[key] = isNaN(value) ? value : Number(value); + } + } + } + + // Don't forget the last property + if (currentProperty) { + schema.properties.push(currentProperty); + } + + return schema; +} + +/** + * Load a single schema by table name + * @param {string} tableName - The schema/table name (without .yml extension) + * @param {string} appPath - Path to the app directory (default: '.') + * @returns {Object|null} Schema object or null if not found + */ +export function loadSchema(tableName, appPath = '.') { + const searchPaths = [ + path.join(appPath, 'app', 'schema', `${tableName}.yml`), + path.join(appPath, 'app', 'model_schemas', `${tableName}.yml`) + ]; + + for (const schemaPath of searchPaths) { + if (fs.existsSync(schemaPath)) { + try { + const content = fs.readFileSync(schemaPath, 'utf8'); + return parseSchemaYaml(content); + } catch (e) { + // File exists but couldn't be parsed + return null; + } + } + } + + return null; +} + +/** + * Load all schemas from the app directory + * @param {string} appPath - Path to the app directory (default: '.') + * @returns {Map} Map of schema name to schema object + */ +export function loadAllSchemas(appPath = '.') { + const schemas = new Map(); + const searchDirs = [ + path.join(appPath, 'app', 'schema'), + path.join(appPath, 'app', 'model_schemas') + ]; + + for (const dir of searchDirs) { + if (fs.existsSync(dir)) { + try { + const files = fs.readdirSync(dir); + for (const file of files) { + if (file.endsWith('.yml') || file.endsWith('.yaml')) { + const schemaPath = path.join(dir, file); + try { + const content = fs.readFileSync(schemaPath, 'utf8'); + const schema = parseSchemaYaml(content); + if (schema.name) { + schemas.set(schema.name, schema); + } + } catch (e) { + // Skip files that can't be parsed + } + } + } + } catch (e) { + // Directory exists but couldn't be read + } + } + } + + return schemas; +} + +/** + * Get version names from a schema property with upload type + * @param {Object} property - Schema property object + * @returns {string[]} Array of version names + */ +export function getUploadVersions(property) { + if (property.type !== 'upload') return []; + if (!property.options?.versions) return []; + return property.options.versions.map(v => v.name); +} + +export { parseSchemaYaml }; diff --git a/mcp-min/data/validate-tool.js b/mcp-min/data/validate-tool.js new file mode 100644 index 000000000..e51be7e2b --- /dev/null +++ b/mcp-min/data/validate-tool.js @@ -0,0 +1,169 @@ +// platformos.data.validate - Validate JSON data against platformOS schemas +import { createRequire } from 'module'; +import { validateRecords, validateJsonStructure } from './validate.js'; +import log from '../log.js'; + +const require = createRequire(import.meta.url); +const fs = require('fs'); +const path = require('path'); + +const dataValidateTool = { + description: 'Validate JSON data against platformOS schemas before import. Checks required fields (id, type, properties, created_at, updated_at), verifies types match schema files in app/schema/, and validates property names and types.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env'], + properties: { + env: { + type: 'string', + description: 'Environment name from .pos config (used for context, validation is local)' + }, + filePath: { + type: 'string', + description: 'Path to JSON file containing records to validate' + }, + jsonData: { + type: 'object', + description: 'JSON data object to validate (with "records" array)' + }, + appPath: { + type: 'string', + description: 'Path to the app directory containing schema files (default: ".")' + }, + strictTypes: { + type: 'boolean', + description: 'Enforce type checking against schema (default: true)' + }, + strictProperties: { + type: 'boolean', + description: 'Error on properties not defined in schema (default: false)' + }, + maxErrors: { + type: 'integer', + description: 'Maximum number of errors to report (default: 100)' + } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:data-validate invoked', { env: params.env }); + + try { + const { + filePath, + jsonData, + appPath = '.', + strictTypes = true, + strictProperties = false, + maxErrors = 100 + } = params; + + // Validate: exactly one data source must be provided + const sources = [filePath, jsonData].filter(Boolean); + if (sources.length === 0) { + return { + ok: false, + error: { + code: 'VALIDATION_ERROR', + message: 'Provide one of: filePath or jsonData' + } + }; + } + if (sources.length > 1) { + return { + ok: false, + error: { + code: 'VALIDATION_ERROR', + message: 'Provide only one of: filePath or jsonData' + } + }; + } + + let data; + + if (filePath) { + const resolved = path.resolve(String(filePath)); + if (!fs.existsSync(resolved)) { + return { + ok: false, + error: { + code: 'FILE_NOT_FOUND', + message: `File not found: ${resolved}` + } + }; + } + + try { + const content = fs.readFileSync(resolved, 'utf8'); + data = JSON.parse(content); + } catch (e) { + return { + ok: false, + error: { + code: 'INVALID_JSON', + message: `Invalid JSON in file: ${e.message}` + } + }; + } + } else { + data = jsonData; + } + + // Validate top-level structure + const structureResult = validateJsonStructure(data); + if (!structureResult.ok) { + return { + ...structureResult, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + + // Extract records array + const records = data.records || []; + + if (!Array.isArray(records)) { + return { + ok: false, + error: { + code: 'INVALID_FORMAT', + message: 'Expected "records" field to be an array' + } + }; + } + + // Run validation + const result = await validateRecords(records, { + appPath, + strictTypes, + strictProperties, + maxErrors + }); + + return { + ...result, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + log.error('tool:data-validate error', { error: String(e) }); + return { + ok: false, + error: { + code: 'VALIDATION_ERROR', + message: String(e.message || e) + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default dataValidateTool; diff --git a/mcp-min/data/validate.js b/mcp-min/data/validate.js new file mode 100644 index 000000000..81fd99289 --- /dev/null +++ b/mcp-min/data/validate.js @@ -0,0 +1,386 @@ +// Core validation module for platformOS data import +// Validates JSON records against schema definitions + +import { loadSchema, loadAllSchemas, getUploadVersions } from './validate-schemas.js'; + +// UUID v4 regex pattern +const UUID_V4_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i; + +// ISO 8601 datetime regex (flexible) +const ISO_DATETIME_REGEX = /^\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}(\.\d+)?(Z|[+-]\d{2}:?\d{2})?)?$/; + +/** + * Validate UUID v4 format + */ +function isValidUUID(value) { + return typeof value === 'string' && UUID_V4_REGEX.test(value); +} + +/** + * Validate ISO 8601 datetime format + */ +function isValidDatetime(value) { + if (typeof value !== 'string') return false; + if (!ISO_DATETIME_REGEX.test(value)) return false; + // Also verify it parses to a valid date + const date = new Date(value); + return !isNaN(date.getTime()); +} + +/** + * Check if a JS value matches a schema type + */ +function matchesSchemaType(value, schemaType) { + if (value === null || value === undefined) { + // null/undefined allowed for optional fields + return true; + } + + switch (schemaType) { + case 'string': + case 'text': + return typeof value === 'string'; + + case 'integer': + return Number.isInteger(value); + + case 'float': + return typeof value === 'number' && !isNaN(value); + + case 'boolean': + return typeof value === 'boolean'; + + case 'datetime': + return isValidDatetime(value); + + case 'array': + return Array.isArray(value); + + case 'upload': + // Upload can be object with path, or null + if (typeof value !== 'object' || value === null) return false; + // Must have path property + return typeof value.path === 'string' || value.path === undefined; + + default: + // Unknown type - be permissive + return true; + } +} + +/** + * Validate upload property versions against schema + */ +function validateUploadVersions(value, schemaProperty) { + const errors = []; + if (!value || typeof value !== 'object') return errors; + + const expectedVersions = getUploadVersions(schemaProperty); + if (expectedVersions.length === 0) return errors; + + const providedVersions = (value.versions && typeof value.versions === 'object') + ? Object.keys(value.versions) + : []; + + // Check for missing versions (defined in schema but not provided) + for (const version of expectedVersions) { + if (!providedVersions.includes(version)) { + errors.push({ + field: `versions.${version}`, + code: 'MISSING_VERSION', + message: `Missing required version "${version}". Expected versions: ${expectedVersions.join(', ')}`, + value: undefined + }); + } + } + + // Check for unknown versions (provided but not in schema) + for (const version of providedVersions) { + if (!expectedVersions.includes(version)) { + errors.push({ + field: `versions.${version}`, + code: 'INVALID_VERSIONS', + message: `Unknown version "${version}". Expected one of: ${expectedVersions.join(', ')}`, + value: version + }); + } + } + + return errors; +} + +/** + * Validate a single record against schema + */ +function validateRecord(record, index, schemas, options) { + const errors = []; + const { strictTypes = true, strictProperties = false } = options; + + // Required field: id (any non-empty value) + if (!record.id && record.id !== 0) { + errors.push({ + field: 'id', + code: 'MISSING_ID', + message: 'Record is missing required field "id"', + value: undefined + }); + } + + // Required field: type + if (!record.type) { + errors.push({ + field: 'type', + code: 'MISSING_TYPE', + message: 'Record is missing required field "type"', + value: undefined + }); + } + + // Required field: properties + if (!record.properties) { + errors.push({ + field: 'properties', + code: 'MISSING_PROPERTIES', + message: 'Record is missing required field "properties"', + value: undefined + }); + } else if (typeof record.properties !== 'object' || Array.isArray(record.properties)) { + errors.push({ + field: 'properties', + code: 'INVALID_PROPERTIES', + message: 'Field "properties" must be an object', + value: typeof record.properties + }); + } + + // Required field: created_at + if (!record.created_at) { + errors.push({ + field: 'created_at', + code: 'MISSING_CREATED_AT', + message: 'Record is missing required field "created_at"', + value: undefined + }); + } else if (!isValidDatetime(record.created_at)) { + errors.push({ + field: 'created_at', + code: 'INVALID_DATETIME', + message: `Invalid datetime format for "created_at": "${record.created_at}"`, + value: record.created_at + }); + } + + // Required field: updated_at + if (!record.updated_at) { + errors.push({ + field: 'updated_at', + code: 'MISSING_UPDATED_AT', + message: 'Record is missing required field "updated_at"', + value: undefined + }); + } else if (!isValidDatetime(record.updated_at)) { + errors.push({ + field: 'updated_at', + code: 'INVALID_DATETIME', + message: `Invalid datetime format for "updated_at": "${record.updated_at}"`, + value: record.updated_at + }); + } + + // Schema validation (only if type is present and schemas are loaded) + if (record.type && schemas) { + const schema = schemas.get(record.type); + + if (!schema) { + errors.push({ + field: 'type', + code: 'UNKNOWN_TYPE', + message: `No schema found for type "${record.type}"`, + value: record.type + }); + } else if (record.properties && typeof record.properties === 'object') { + // Build property map from schema + const schemaProps = new Map(); + for (const prop of schema.properties || []) { + schemaProps.set(prop.name, prop); + } + + // Validate each property in the record + for (const [propName, propValue] of Object.entries(record.properties)) { + const schemaProp = schemaProps.get(propName); + + if (!schemaProp) { + if (strictProperties) { + errors.push({ + field: `properties.${propName}`, + code: 'UNKNOWN_PROPERTY', + message: `Property "${propName}" is not defined in schema "${record.type}"`, + value: propValue + }); + } + continue; + } + + // Type validation + if (strictTypes && propValue !== null && propValue !== undefined) { + if (!matchesSchemaType(propValue, schemaProp.type)) { + errors.push({ + field: `properties.${propName}`, + code: 'TYPE_MISMATCH', + message: `Property "${propName}" has wrong type. Expected "${schemaProp.type}", got "${typeof propValue}"`, + value: propValue + }); + } + + // Upload version validation + if (schemaProp.type === 'upload' && typeof propValue === 'object') { + const versionErrors = validateUploadVersions(propValue, schemaProp); + errors.push(...versionErrors.map(e => ({ + ...e, + field: `properties.${propName}.${e.field}` + }))); + } + } + } + } + } + + return errors; +} + +/** + * Validate an array of records against platformOS schemas + * + * @param {Array} records - Array of record objects to validate + * @param {Object} options - Validation options + * @param {string} options.appPath - Path to the app directory (default: '.') + * @param {boolean} options.strictTypes - Enforce type checking (default: true) + * @param {boolean} options.strictProperties - Error on unknown properties (default: false) + * @param {number} options.maxErrors - Maximum errors to collect (default: 100) + * @param {Map} options.schemas - Pre-loaded schemas (optional, will load if not provided) + * @returns {Object} Validation result { ok, data?, error? } + */ +export async function validateRecords(records, options = {}) { + const { + appPath = '.', + strictTypes = true, + strictProperties = false, + maxErrors = 100, + schemas: preloadedSchemas + } = options; + + // Input validation + if (!Array.isArray(records)) { + return { + ok: false, + error: { + code: 'INVALID_INPUT', + message: 'Records must be an array', + details: [] + } + }; + } + + if (records.length === 0) { + return { + ok: true, + data: { + valid: true, + recordsValidated: 0 + } + }; + } + + // Load schemas + const schemas = preloadedSchemas || loadAllSchemas(appPath); + + const allErrors = []; + let totalErrors = 0; + + for (let i = 0; i < records.length; i++) { + const record = records[i]; + const recordErrors = validateRecord(record, i, schemas, { strictTypes, strictProperties }); + + if (recordErrors.length > 0) { + allErrors.push({ + recordIndex: i, + recordId: record.id || '(no id)', + recordType: record.type || '(no type)', + errors: recordErrors + }); + + totalErrors += recordErrors.length; + if (totalErrors >= maxErrors) { + break; + } + } + } + + if (allErrors.length > 0) { + return { + ok: false, + error: { + code: 'VALIDATION_FAILED', + message: `Validation failed with ${totalErrors} error(s) in ${allErrors.length} record(s)`, + details: allErrors + } + }; + } + + return { + ok: true, + data: { + valid: true, + recordsValidated: records.length + } + }; +} + +const VALID_TOP_LEVEL_KEYS = ['records', 'users']; + +/** + * Validate the top-level structure of import JSON data + * Must have at least one of: records, users + * @param {any} data - The parsed JSON data + * @returns {Object} { ok: true } or { ok: false, error: { code, message } } + */ +function validateJsonStructure(data) { + if (!data || typeof data !== 'object' || Array.isArray(data)) { + return { + ok: false, + error: { + code: 'INVALID_STRUCTURE', + message: 'Import data must be an object with "records" and/or "users" keys' + } + }; + } + + const topKeys = Object.keys(data); + const validKeys = topKeys.filter(k => VALID_TOP_LEVEL_KEYS.includes(k)); + const invalidKeys = topKeys.filter(k => !VALID_TOP_LEVEL_KEYS.includes(k)); + + if (validKeys.length === 0) { + return { + ok: false, + error: { + code: 'INVALID_STRUCTURE', + message: `Import data must contain at least one of: ${VALID_TOP_LEVEL_KEYS.join(', ')}. Found keys: ${topKeys.join(', ') || '(none)'}` + } + }; + } + + if (invalidKeys.length > 0) { + return { + ok: false, + error: { + code: 'INVALID_STRUCTURE', + message: `Unknown top-level keys: ${invalidKeys.join(', ')}. Valid keys are: ${VALID_TOP_LEVEL_KEYS.join(', ')}` + } + }; + } + + return { ok: true }; +} + +// Export helper functions for testing +export { isValidUUID, isValidDatetime, matchesSchemaType, validateRecord, validateJsonStructure }; diff --git a/mcp-min/deploy/start.js b/mcp-min/deploy/start.js new file mode 100644 index 000000000..7dda5223e --- /dev/null +++ b/mcp-min/deploy/start.js @@ -0,0 +1,150 @@ +// platformos.deploy.start - create archive and deploy to platformOS instance +import fs from 'fs'; +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; +import { makeArchive } from '../../lib/archive.js'; +import { deployAssets } from '../../lib/assets.js'; + +// Aliases for backwards compatibility +const archive = { makeArchive }; +const assets = { deployAssets }; +import dir from '../../lib/directories.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const startDeployTool = { + description: 'Deploy to platformOS instance. Creates archive from app/ and modules/ directories, uploads it, and deploys assets directly to S3.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + partial: { type: 'boolean', description: 'Partial deploy - does not remove files missing from build', default: false } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:deploy-start invoked', { env: params?.env, partial: params?.partial }); + + try { + const auth = await resolveAuth(params); + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + + // Set env vars needed by archive and assets modules + process.env.MARKETPLACE_EMAIL = auth.email; + process.env.MARKETPLACE_TOKEN = auth.token; + process.env.MARKETPLACE_URL = auth.url; + + const partial = !!params.partial; + const archivePath = './tmp/release.zip'; + + // Check for deployable directories + const availableDirs = dir.available(); + if (availableDirs.length === 0) { + return { + ok: false, + error: { + code: 'NO_DIRECTORIES', + message: `No deployable directories found. Need at least one of: ${dir.ALLOWED.join(', ')}` + } + }; + } + + // Ensure tmp directory exists + if (!fs.existsSync('./tmp')) { + fs.mkdirSync('./tmp', { recursive: true }); + } + + // Create archive (without assets - they're uploaded directly) + const env = { TARGET: archivePath }; + const numberOfFiles = await archive.makeArchive(env, { withoutAssets: true }); + + if (numberOfFiles === 0 || numberOfFiles === false) { + return { + ok: false, + error: { code: 'EMPTY_ARCHIVE', message: 'No files to deploy. Archive would be empty.' } + }; + } + + // Push the archive + const formData = { + 'marketplace_builder[partial_deploy]': String(partial), + 'marketplace_builder[zip_file]': fs.createReadStream(archivePath) + }; + + const pushResponse = await gateway.push(formData); + + // Deploy assets in the background (S3 upload + CDN wait can take 90s+) + let assetsInfo = null; + try { + const assetsToDeploy = await files.getAssets(); + if (assetsToDeploy.length > 0) { + // Fire and forget - don't block the MCP response + assets.deployAssets(gateway).then(() => { + log.info('Background asset deployment completed'); + }).catch(err => { + log.error('Background asset deployment failed', { error: String(err) }); + }); + assetsInfo = { count: assetsToDeploy.length, status: 'deploying_in_background' }; + } else { + assetsInfo = { count: 0, skipped: true }; + } + } catch (assetErr) { + assetsInfo = { error: String(assetErr) }; + } + + return { + ok: true, + data: { + id: pushResponse.id, + status: pushResponse.status + }, + archive: { path: archivePath, fileCount: numberOfFiles }, + assets: assetsInfo, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source }, + params: { partial } + } + }; + } catch (e) { + log.error('tool:deploy-start error', { error: String(e) }); + return { ok: false, error: { code: 'DEPLOY_START_ERROR', message: String(e.message || e) } }; + } + } +}; + +export default startDeployTool; diff --git a/mcp-min/deploy/status.js b/mcp-min/deploy/status.js new file mode 100644 index 000000000..188719313 --- /dev/null +++ b/mcp-min/deploy/status.js @@ -0,0 +1,64 @@ +// platformos.deploy.status - check deployment status via Gateway.getStatus +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const statusDeployTool = { + description: 'Get current deployment status using Gateway.getStatus(id).', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + endpoint: { type: 'string' }, + id: { type: 'string', description: 'Deployment ID returned from start' } + }, + required: ['id'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + try { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const resp = await gateway.getStatus(params.id); + return { + ok: true, + data: resp, + meta: { startedAt, finishedAt: new Date().toISOString(), auth: { url: baseUrl, email: auth.email, source: auth.source } } + }; + } catch (e) { + return { ok: false, error: { code: 'DEPLOY_STATUS_ERROR', message: String(e) } }; + } + } +}; + +export default statusDeployTool; diff --git a/mcp-min/deploy/wait.js b/mcp-min/deploy/wait.js new file mode 100644 index 000000000..75b2579a6 --- /dev/null +++ b/mcp-min/deploy/wait.js @@ -0,0 +1,85 @@ +// platformos.deploy.wait - poll deployment status until completion +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function delay(ms) { return new Promise(r => setTimeout(r, ms)); } + +const waitDeployTool = { + description: 'Wait for deployment to finish. Polls Gateway.getStatus(id) every intervalMs (default 1000ms) and errors on status=error.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + endpoint: { type: 'string' }, + id: { type: 'string', description: 'Deployment ID' }, + intervalMs: { type: 'integer', minimum: 200, default: 1000 }, + maxWaitMs: { type: 'integer', minimum: 1000, description: 'Optional maximum wait in ms; if exceeded returns timeout error' } + }, + required: ['id'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + try { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const id = params.id; + const interval = Number.isFinite(params?.intervalMs) ? Number(params.intervalMs) : 1000; + const started = Date.now(); + const maxWait = Number.isFinite(params?.maxWaitMs) && params.maxWaitMs > 0 ? Number(params.maxWaitMs) : null; + while (true) { + const resp = await gateway.getStatus(id); + if (resp && resp.status === 'ready_for_import') { + if (maxWait && (Date.now() - started) >= maxWait) { + return { ok: false, error: { code: 'TIMEOUT', message: `Timed out waiting for deployment after ${maxWait}ms`, data: resp } }; + } + await delay(interval); + continue; + } + if (resp && resp.status === 'error') { + const body = resp.error || {}; + let message = body.error || 'Deployment error'; + if (body.details && body.details.file_path) { + message += `\n${body.details.file_path}`; + } + return { ok: false, error: { code: 'DEPLOY_ERROR', message, data: resp } }; + } + return { ok: true, data: resp, meta: { startedAt, finishedAt: new Date().toISOString() } }; + } + } catch (e) { + return { ok: false, error: { code: 'DEPLOY_WAIT_ERROR', message: String(e) } }; + } + } +}; + +export default waitDeployTool; diff --git a/mcp-min/generators/help.js b/mcp-min/generators/help.js new file mode 100644 index 000000000..0d390a695 --- /dev/null +++ b/mcp-min/generators/help.js @@ -0,0 +1,25 @@ +import path from 'path'; +import { showHelp } from './utils.js'; + +const helpTool = { + description: 'Show detailed help for a specific generator', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + generatorPath: { type: 'string', description: 'Path like modules/core/generators/' } + }, + required: ['generatorPath'] + }, + handler: async (params, ctx = {}) => { + try { + const { generatorPath } = params || {}; + const info = showHelp(path.isAbsolute(generatorPath) ? generatorPath : path.join(process.cwd(), generatorPath), ctx.yeomanEnv); + return info; + } catch (e) { + return { success: false, error: { code: 'GENERATOR_HELP_ERROR', message: String(e?.message || e) } }; + } + } +}; + +export default helpTool; diff --git a/mcp-min/generators/list.js b/mcp-min/generators/list.js new file mode 100644 index 000000000..e74beba4a --- /dev/null +++ b/mcp-min/generators/list.js @@ -0,0 +1,24 @@ +import path from 'path'; +import { listGeneratorPathsSync, showHelp } from './utils.js'; + +const listTool = { + description: 'List available generators discovered under **/generators/*/index.js with required and optional args', + inputSchema: { type: 'object', additionalProperties: false, properties: {} }, + handler: async (_params, ctx = {}) => { + const gens = listGeneratorPathsSync(ctx.globSync); + const detailed = gens.map((g) => { + try { + const p = path.isAbsolute(g.path) ? g.path : path.join(process.cwd(), g.path); + const info = showHelp(p, ctx.yeomanEnv); + const required = Array.isArray(info?.args) ? info.args.filter(a => a.required).map(a => a.name) : []; + const optional = Array.isArray(info?.args) ? info.args.filter(a => !a.required).map(a => a.name) : []; + return { ...g, required, optional }; + } catch (e) { + return { ...g, required: [], optional: [] }; + } + }); + return { generators: detailed }; + } +}; + +export default listTool; diff --git a/mcp-min/generators/run.js b/mcp-min/generators/run.js new file mode 100644 index 000000000..082cadd8c --- /dev/null +++ b/mcp-min/generators/run.js @@ -0,0 +1,42 @@ +import path from 'path'; +import { runGenerator } from './utils.js'; + +const runTool = { + description: 'Run a yeoman generator by path with arguments and options', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + generatorPath: { type: 'string' }, + args: { type: 'array', items: { type: 'string' }, description: 'Positional arguments passed to generator (order matters)' }, + options: { type: 'object', additionalProperties: true, description: 'Options passed to generator (like --name=value)' }, + requireArgs: { type: 'boolean', description: 'If true, validate that required generator args are provided', default: true } + }, + required: ['generatorPath', 'args'] + }, + handler: async (params, ctx = {}) => { + const { generatorPath, args = [], options = {}, requireArgs = true } = params || {}; + const resolvedPath = path.isAbsolute(generatorPath) ? generatorPath : path.join(process.cwd(), generatorPath); + + // If validation requested, introspect required args using showHelp and enforce presence + if (requireArgs) { + try { + const info = (await import('./help.js')).default; + const helpInfo = await info.handler({ generatorPath: resolvedPath }, { yeomanEnv: ctx.yeomanEnv }); + if (helpInfo && helpInfo.args && Array.isArray(helpInfo.args)) { + const requiredNames = helpInfo.args.filter(a => a.required).map(a => a.name); + if (requiredNames.length > 0 && (!Array.isArray(args) || args.length < requiredNames.length)) { + return { success: false, error: { code: 'MISSING_REQUIRED_ARGUMENTS', message: `Missing required args: ${requiredNames.join(', ')}` }, required: requiredNames }; + } + } + } catch (e) { + // Ignore help errors; fallback to running + } + } + + const result = await runGenerator(resolvedPath, args, options, ctx.yeomanEnv); + return { success: true, result }; + } +}; + +export default runTool; diff --git a/mcp-min/generators/utils.js b/mcp-min/generators/utils.js new file mode 100644 index 000000000..c7698f0b5 --- /dev/null +++ b/mcp-min/generators/utils.js @@ -0,0 +1,94 @@ +import path from 'path'; +import table from 'text-table'; +import { execaSync } from 'execa'; +import fastGlob from 'fast-glob'; +import yeoman from 'yeoman-environment'; + +function listGeneratorPathsSync(globSync) { + const glob = globSync || fastGlob.sync; + const files = glob('**/generators/*/index.js', { dot: false, ignore: ['**/node_modules/**', '**/.git/**'] }); + const entries = files.map((f) => { + const generatorPath = f.replace(/\/index\.js$/, ''); + const name = generatorPath.split('/').pop(); + return { name, path: generatorPath }; + }); + return entries; +} + +function ensureEnv(yeomanEnv) { + if (yeomanEnv) return yeomanEnv; + return yeoman.createEnv(); +} + +function registerGenerator(generatorPath, yeomanEnv) { + const env = ensureEnv(yeomanEnv); + const generatorName = path.basename(generatorPath); + + // Resolve absolute path to the generator's index.js (support absolute and relative inputs) + const fullIndexPath = path.isAbsolute(generatorPath) + ? path.join(generatorPath, 'index.js') + : path.join(process.cwd(), generatorPath, 'index.js'); + + env.register(fullIndexPath, generatorName); + try { + env.get(generatorName); + } catch (e) { + if (String(e.message || e).includes('Cannot find module')) { + // Attempt dependency install in likely directories, then retry + const candidates = []; + // If path matches modules/, try installing in that module root + const m = generatorPath.match(/modules\/[^/]+/); + if (m) candidates.push(path.isAbsolute(m[0]) ? m[0] : path.join(process.cwd(), m[0])); + // Also try the generator root directory + candidates.push(path.dirname(fullIndexPath)); + + for (const dir of candidates) { + try { + execaSync('npm', ['install'], { stdio: 'inherit', cwd: dir }); + } catch (_) {} + } + // Retry loading the generator + env.get(generatorName); + } else { + throw e; + } + } + return { name: generatorName, env }; +} + +function showHelp(generatorPath, yeomanEnv) { + const { name: generatorName, env } = registerGenerator(generatorPath, yeomanEnv); + const generator = env.get(generatorName); + const instance = env.instantiate(generator, ['']); + const usage = instance._arguments.map((a) => `<${a.name}>`).join(' '); + const argsHelp = instance.argumentsHelp(); + const rawOpts = instance._options || []; + const optsArr = Array.isArray(rawOpts) ? rawOpts : Object.values(rawOpts); + // Show options unless explicitly hidden (hide === true or 'yes') + const rows = (optsArr || []) + .filter((opt) => opt && !(opt.hide === true || String(opt.hide).toLowerCase() === 'yes')) + .map((opt) => [ '', opt.alias ? `-${opt.alias}, ` : '', `--${opt.name}`, opt.description ? `# ${opt.description}` : '', (opt.default !== undefined && opt.default !== '') ? `Default: ${opt.default}` : '' ]); + const optionsText = table(rows); + const argsDetailed = (instance._arguments || []).map(a => ({ name: a.name, required: !!a.required })); + const requiredArgs = argsDetailed.filter(a => a.required).map(a => a.name); + const optionalArgs = argsDetailed.filter(a => !a.required).map(a => a.name); + return { + name: generatorName, + description: instance.description || '', + usage: `pos-cli generate ${generatorPath} ${usage}`.trim(), + arguments: argsHelp, + optionsTable: optionsText, + args: argsDetailed, + requiredArgs, + optionalArgs + }; +} + +async function runGenerator(generatorPath, attributes = [], options = {}, yeomanEnv) { + const { name: generatorName, env } = registerGenerator(generatorPath, yeomanEnv); + const args = [generatorName].concat(attributes || []); + await env.run(args, options); + return { ok: true }; +} + +export { listGeneratorPathsSync, showHelp, runGenerator }; diff --git a/mcp-min/graphql/exec.js b/mcp-min/graphql/exec.js new file mode 100644 index 000000000..c73afb1cd --- /dev/null +++ b/mcp-min/graphql/exec.js @@ -0,0 +1,99 @@ +// platformos.graphql.exec tool - execute GraphQL via Gateway.graph +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const execGraphqlTool = { + description: 'Execute a GraphQL query or mutation on a platformOS instance via /api/graph. Returns JSON data and errors from the instance. Auth resolved from: explicit params > MPKIT_* env vars > .pos config. Use variables to pass dynamic values safely instead of string interpolation.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config (e.g., staging, production). Used to resolve auth when url/email/token are not provided.' }, + url: { type: 'string', description: 'Instance URL (e.g., https://my-app.staging.oregon.platform-os.com). Requires email and token.' }, + email: { type: 'string', description: 'Email for instance authentication. Required with url and token.' }, + token: { type: 'string', description: 'API token for instance authentication. Required with url and email.' }, + endpoint: { type: 'string', description: 'Override the base URL for the GraphQL endpoint. Defaults to the resolved instance URL.' }, + query: { type: 'string', description: 'GraphQL query or mutation string (e.g., "{ users { results { id email } } }").' }, + variables: { type: 'object', additionalProperties: true, description: 'Variables to pass to the GraphQL query/mutation. Preferred over string interpolation for dynamic values.' } + }, + required: ['query'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const body = { query: params.query, variables: params.variables || {} }; + + try { + const resp = await gateway.graph(body); + + if (resp && Array.isArray(resp.errors) && resp.errors.length > 0) { + // Return error object but do not throw (keep HTTP 200 at MCP layer) + const firstMsg = resp.errors[0]?.message || 'GraphQL execution error'; + return { + success: false, + error: { + code: 'GRAPHQL_EXEC_ERROR', + message: `GraphQLError: ${firstMsg}`, + details: { errors: resp.errors, data: resp.data ?? null } + }, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: baseUrl, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + return { + success: true, + result: resp, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: baseUrl, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + // Return error instead of throwing + return { + success: false, + error: { code: 'GRAPHQL_EXEC_ERROR', message: String(e) } + }; + } + } +}; + +export default execGraphqlTool; diff --git a/mcp-min/http-server.js b/mcp-min/http-server.js new file mode 100644 index 000000000..f4c4c7da7 --- /dev/null +++ b/mcp-min/http-server.js @@ -0,0 +1,264 @@ +import express from 'express'; +import bodyParser from 'body-parser'; +import tools from './tools.js'; +import { sseHandler, writeSSE } from './sse.js'; +import { DEBUG } from './config.js'; +import log from './log.js'; + +let currentSSE = null; // minimal session: last SSE connection + +export default async function startHttp({ port = 5910 } = {}) { + const app = express(); + + const router = express.Router(); + + // Request logging middleware (replaces morgan) + app.use((req, res, next) => { + const start = process.hrtime.bigint(); + res.on('finish', () => { + const durationMs = Number(process.hrtime.bigint() - start) / 1e6; + log.info(`${req.method} ${req.originalUrl || req.url} ${res.statusCode} ${durationMs.toFixed(1)}ms`); + }); + log.debug('HTTP request', { + method: req.method, + url: req.originalUrl || req.url, + remoteAddress: req.ip || req.connection?.remoteAddress, + headers: req.headers + }); + next(); + }); + + app.use(bodyParser.json({ limit: '1mb' })); + + // Root route for basic info and discovery + const handleBaseRoot = (req, res) => { + const acceptHeader = req.get('accept') || ''; + const wantsSSE = /text\/event-stream/i.test(acceptHeader) || (typeof req.accepts === 'function' && !!req.accepts(['text/event-stream'])); + if (wantsSSE) { + // SSE handshake on base URL for clients that only know base url + transport=sse + sseHandler(req, res); + currentSSE = res; + req.on('close', () => { + if (currentSSE === res) currentSSE = null; + }); + // minimal required event (plain text) + const endpointPath = '/call-stream'; + writeSSE(res, { event: 'endpoint', data: endpointPath }); + // extended info (optional) + const baseUrl = `${req.protocol}://${req.get('host')}`; + writeSSE(res, { event: 'endpoint_info', data: JSON.stringify({ base_url: baseUrl, transport: 'sse', path: '/call-stream' }) }); + return; // keep connection open for client to proceed as designed + } + + res.json({ + name: 'mcp-min', + status: 'ok', + endpoints: { + health: { method: 'GET', path: `/health` }, + tools: { method: 'GET', path: `/tools` }, + call: { method: 'POST', path: `/call` }, + call_stream: { method: 'POST', path: `/call-stream`, transport: 'sse' } + } + }); + }; + + router.get('/', handleBaseRoot); + + router.get('/health', (req, res) => res.json({ status: 'ok' })); + + router.get('/tools', (req, res) => { + const list = Object.keys(tools).map((k) => ({ id: k, description: tools[k].description || '' })); + res.json({ tools: list }); + }); + + router.post('/call', async (req, res) => { + const body = req.body || {}; + const tool = body.tool || body.name || body.id; + const params = body.params ?? body.input ?? body.data ?? {}; + if (!tool) return res.status(400).json({ error: 'tool required (expected body.tool/name/id)' }); + const entry = tools[tool]; + if (!entry) return res.status(404).json({ error: `tool not found: ${tool}` }); + + try { + log.debug('HTTP /call', { tool, params, rawBodyKeys: Object.keys(body) }); + const result = await entry.handler(params || {}, { transport: 'http', debug: DEBUG }); + log.debug('HTTP /call result', { tool, result }); + res.json({ result }); + } catch (err) { + log.debug('HTTP /call error', { tool, err: String(err), details: err && err._pos }); + const payload = { error: String(err) }; + if (err && err._pos) payload.details = err._pos; + res.status(500).json(payload); + } + }); + + // Streaming call with SSE + const callStreamHandler = async (req, res) => { + const body = req.body || {}; + + // JSON-RPC compatibility path (e.g., cagent initialize, tools/list) + if (body && body.jsonrpc === '2.0') { + const id = body.id ?? null; + const method = body.method; + const params = body.params || {}; + + const respond = (payload) => { + const responsePayload = { jsonrpc: '2.0', id, ...payload }; + // If this is a notification (no id), acknowledge with 202 + if (id == null) { + try { res.set('Mcp-Protocol-Version', responsePayload.result?.protocolVersion || '2025-06-18'); } catch {} + try { res.set('Mcp-Session-Id', 'mcpmin-1'); } catch {} + log.debug('JSON-RPC notify -> 202 Accepted'); + res.status(202).end(); + return true; + } + // For requests with id: emit on SSE (if present) and also return JSON + if (currentSSE) { + log.debug('JSON-RPC respond on SSE channel', { method, id }); + writeSSE(currentSSE, { event: 'message', data: JSON.stringify(responsePayload) }); + } + try { res.set('Mcp-Protocol-Version', responsePayload.result?.protocolVersion || '2025-06-18'); } catch {} + try { res.set('Mcp-Session-Id', 'mcpmin-1'); } catch {} + // Always return 200 for JSON-RPC responses; include errors in payload per protocol expectations + const status = 200; + log.debug(`JSON-RPC respond ${status} JSON`, { method, id, response: responsePayload }); + res.status(status).json(responsePayload); + return true; + }; + + // Methods + if (method === 'initialize') { + const result = { + protocolVersion: params.protocolVersion || '2025-06-18', + capabilities: { + roots: { listChanged: true }, + prompts: {}, + tools: {} + }, + serverInfo: { name: 'mcp-min', version: '0.1.0' } + }; + try { res.set('Mcp-Protocol-Version', result.protocolVersion); } catch {} + try { res.set('Mcp-Session-Id', 'mcpmin-1'); } catch {} + respond({ result }); + return; + } + + if (method === 'tools/list') { + const list = Object.keys(tools).map((name) => ({ + name, + description: tools[name].description || '', + inputSchema: tools[name].inputSchema || { type: 'object', additionalProperties: true } + })); + respond({ result: { tools: list } }); + return; + } + + if (method === 'tools/call') { + try { + const name = params?.name || params?.tool || params?.id; + const args = params?.arguments || params?.params || params?.input || {}; + if (!name) { + respond({ error: { code: -32602, message: 'Invalid params: name required' } }); + return; + } + const entry = tools[name]; + if (!entry || typeof entry.handler !== 'function') { + respond({ error: { code: -32601, message: `Tool not found: ${name}` } }); + return; + } + const result = await entry.handler(args, { transport: 'jsonrpc', debug: DEBUG }); + // Wrap result as text content for broad client compatibility + const text = (() => { try { return JSON.stringify(result); } catch { return String(result); } })(); + respond({ result: { content: [{ type: 'text', text }] } }); + return; + } catch (e) { + respond({ error: { code: -32603, message: `Internal error: ${String(e)}` } }); + return; + } + } + + if (method === 'roots/list') { + respond({ result: { roots: [] } }); + return; + } + + // Unknown method -> JSON-RPC error + const error = { code: -32601, message: `Method not found: ${method}` }; + respond({ error }); + return; + } + + // Legacy tool streaming path + const tool = body.tool || body.name || body.id; + const params = body.params ?? body.input ?? body.data ?? {}; + if (!tool) return res.status(400).json({ error: 'tool required (expected body.tool/name/id)' }); + const entry = tools[tool]; + if (!entry) return res.status(404).json({ error: `tool not found: ${tool}` }); + + // Prepare SSE response + sseHandler(req, res); + + // Emit initial endpoint event required by some clients (legacy pattern) + try { + const baseUrl = `${req.protocol}://${req.get('host')}`; + // Plain string first event + writeSSE(res, { event: 'endpoint', data: 'call-stream' }); + // Extended info (optional secondary event) + writeSSE(res, { event: 'endpoint_info', data: JSON.stringify({ base_url: baseUrl, transport: 'sse', path: '/call-stream' }) }); + log.debug('SSE initial endpoint event(s) sent', { endpoint: 'call-stream' }); + } catch (e) { + log.debug('Failed to send initial endpoint event', String(e)); + } + + let closed = false; + req.on('close', () => { closed = true; log.debug('SSE connection closed', { tool }); }); + + // Provide a simple writer function to the tool + const writer = (event) => { + if (closed) return; + log.debug('SSE write', { tool, event }); + writeSSE(res, event); + }; + + // Call the tool's stream handler if present + if (typeof entry.streamHandler === 'function') { + try { + log.debug('HTTP /call-stream start', { tool, params }); + entry.streamHandler(params || {}, { transport: 'http', writer, debug: DEBUG }) + .then(() => { + writeSSE(res, { event: 'done', data: '' }); + res.end(); + log.debug('HTTP /call-stream done', { tool }); + }) + .catch((err) => { + writeSSE(res, { event: 'error', data: String(err) }); + res.end(); + log.debug('HTTP /call-stream error', { tool, err: String(err) }); + }); + } catch (err) { + writeSSE(res, { event: 'error', data: String(err) }); + res.end(); + log.debug('HTTP /call-stream exception', { tool, err: String(err) }); + } + } else { + writeSSE(res, { event: 'error', data: 'tool has no streamHandler' }); + res.end(); + log.debug('HTTP /call-stream missing streamHandler', { tool }); + } + }; + + router.post('/call-stream', callStreamHandler); + + // Mount only at root + app.use('/', router); + + // Ensure POST /call-stream is available at root + app.post('/call-stream', callStreamHandler); + + return new Promise((resolve) => { + const server = app.listen(port, () => { + log.info('HTTP server listening', { port }); + resolve(server); + }); + }); +} diff --git a/mcp-min/index.js b/mcp-min/index.js new file mode 100644 index 000000000..1e52a5b30 --- /dev/null +++ b/mcp-min/index.js @@ -0,0 +1,35 @@ +import startStdio from './stdio-server.js'; +import startHttp from './http-server.js'; +import log from './log.js'; + +const PORT = process.env.MCP_MIN_PORT || 5910; + +// Global handlers - exit cleanly on EPIPE (client disconnected) +process.on('uncaughtException', (err) => { + if (err.code === 'EPIPE' || err.code === 'ERR_STREAM_DESTROYED') { + log.debug('Pipe closed, exiting'); + process.exit(0); + } + log.error('Uncaught exception', err.message); +}); + +process.on('unhandledRejection', (reason) => { + log.error('Unhandled rejection', String(reason)); +}); + +async function main() { + log.info('mcp-min: starting MCP minimal server...'); + + // Start stdio transport (MCP over stdio) + startStdio(); + + // Start HTTP server (includes SSE streaming endpoint) + await startHttp({ port: PORT }); + + log.info(`mcp-min: HTTP server listening on http://localhost:${PORT}`); +} + +main().catch(err => { + log.error('Fatal error during startup', String(err)); + process.exit(1); +}); diff --git a/mcp-min/liquid/exec.js b/mcp-min/liquid/exec.js new file mode 100644 index 000000000..43df0e55c --- /dev/null +++ b/mcp-min/liquid/exec.js @@ -0,0 +1,103 @@ +// platformos.liquid.exec tool - execute Liquid on remote instance via Gateway.liquid +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const execLiquidTool = { + description: 'Render a Liquid template on a platformOS instance server-side via /api/app_builder/liquid_exec. Returns the rendered output. Useful for testing Liquid code, running one-off queries via {% graphql %}, or inspecting instance state. Auth resolved from: explicit params > MPKIT_* env vars > .pos config.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config (e.g., staging, production). Used to resolve auth when url/email/token are not provided.' }, + url: { type: 'string', description: 'Instance URL (e.g., https://my-app.staging.oregon.platform-os.com). Requires email and token.' }, + email: { type: 'string', description: 'Email for instance authentication. Required with url and token.' }, + token: { type: 'string', description: 'API token for instance authentication. Required with url and email.' }, + endpoint: { type: 'string', description: 'Override the base URL for the Liquid exec endpoint. Defaults to the resolved instance URL.' }, + template: { type: 'string', description: 'Liquid template string to render server-side (e.g., "Hello {{ name }}", "{% graphql g = \'users/search\' %}").' }, + locals: { type: 'object', additionalProperties: true, description: 'Variables available inside the template as top-level Liquid variables (e.g., { "name": "World" } makes {{ name }} render "World").' } + }, + required: ['template'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const body = { + content: params.template, + locals: params.locals || {} + }; + + try { + const resp = await gateway.liquid(body); + const finishedAt = new Date().toISOString(); + + // Detect logical errors returned by the endpoint (HTTP 200 but Liquid error payload) + const respError = resp && (resp.error || resp.errors); + const resultStr = typeof resp?.result === 'string' ? resp.result.toLowerCase() : ''; + const looksLikeError = resultStr.includes('error'); + + if (respError || looksLikeError) { + const message = String(resp?.error || resp?.errors || resp?.result || 'Liquid execution failed'); + return { + success: false, + error: { code: 'LIQUID_EXEC_ERROR', message, details: resp }, + meta: { + startedAt, + finishedAt, + auth: { url: baseUrl, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + return { + success: true, + result: resp, + meta: { + startedAt, + finishedAt, + auth: { url: baseUrl, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + return { + success: false, + error: { code: 'LIQUID_EXEC_ERROR', message: String(e) } + }; + } + } +}; + +export default execLiquidTool; diff --git a/mcp-min/log.js b/mcp-min/log.js new file mode 100644 index 000000000..6949aead4 --- /dev/null +++ b/mcp-min/log.js @@ -0,0 +1,64 @@ +// Unified logging for mcp-min +// - Never writes to stdout (safe for stdio JSON-RPC transport) +// - Writes to file + stderr +// - debug() gated on DEBUG/MCP_MIN_DEBUG flags +import { mkdirSync, appendFileSync } from 'fs'; +import { homedir } from 'os'; +import path from 'path'; +import { DEBUG } from './config.js'; + +const LOG_DIR = path.join(homedir(), '.pos-cli', 'logs'); +const LOG_FILE = process.env.MCP_MIN_LOG_FILE || path.join(LOG_DIR, 'mcp-min.log'); + +let logReady = false; + +function init() { + if (logReady) return; + try { + mkdirSync(path.dirname(LOG_FILE), { recursive: true }); + logReady = true; + } catch { + // ignore - logging is best-effort + } +} + +function write(level, message, data) { + const ts = new Date().toISOString(); + const suffix = data !== undefined ? ` ${JSON.stringify(data)}` : ''; + const line = `[${level} ${ts}] ${message}${suffix}\n`; + + // Always write to stderr (never stdout) + try { + process.stderr.write(line); + } catch { + // best-effort + } + + // Write to log file + init(); + if (logReady) { + try { + appendFileSync(LOG_FILE, line); + } catch { + // best-effort + } + } +} + +const log = { + debug(message, data) { + if (DEBUG) write('DEBUG', message, data); + }, + info(message, data) { + write('INFO', message, data); + }, + warn(message, data) { + write('WARN', message, data); + }, + error(message, data) { + write('ERROR', message, data); + }, + LOG_FILE +}; + +export default log; diff --git a/mcp-min/logs/fetch.js b/mcp-min/logs/fetch.js new file mode 100644 index 000000000..da0a9c030 --- /dev/null +++ b/mcp-min/logs/fetch.js @@ -0,0 +1,105 @@ +// platformos.logs.fetch tool - batch fetch logs based on pos-cli fetch-logs +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + // precedence: explicit params -> MPKIT_* env -> .pos by env -> first .pos + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const fetchLogsTool = { + description: 'Fetch recent logs in batches (NDJSON semantics, returns JSON array here). Mirrors pos-cli fetch-logs.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + lastId: { type: 'string' }, + endpoint: { type: 'string', description: 'Override API base url' }, + limit: { type: 'integer', minimum: 1, maximum: 10000 } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + const auth = await resolveAuth(params); + + // Allow endpoint override (CLI option --endpoint) + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + // lastId handling similar to CLI: default '0' if not provided + let latestId = (params && (params.lastId !== undefined && params.lastId !== null) ? String(params.lastId) : '0'); + let seen = new Set(); + const out = []; + const maxCount = params?.limit && Number.isFinite(params.limit) ? Number(params.limit) : Infinity; + + while (true) { + const prevId = latestId; + const response = await gateway.logs({ lastId: latestId }); + const logs = response && response.logs; + if (!logs || logs.length === 0) break; + + let maxId = latestId; + for (let i = 0; i < logs.length; i++) { + const row = logs[i]; + if (seen.has(row.id)) continue; + seen.add(row.id); + out.push(row); + // numeric-only comparison like CLI for safety + const curr = Number(row.id); + if (!Number.isNaN(curr)) { + const prev = Number(maxId); + if (Number.isNaN(prev) || curr > prev) maxId = String(row.id); + } + if (out.length >= maxCount) break; + } + + if (maxId === prevId) break; // no progress + latestId = maxId; + if (out.length >= maxCount) break; + } + + return { + logs: out, + lastId: latestId, + meta: { + startedAt, + finishedAt: new Date().toISOString(), + count: out.length, + auth: { url: baseUrl, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } +}; + +export default fetchLogsTool; diff --git a/mcp-min/logs/stream.js b/mcp-min/logs/stream.js new file mode 100644 index 000000000..503284446 --- /dev/null +++ b/mcp-min/logs/stream.js @@ -0,0 +1,124 @@ +// platformos.logs.stream - streaming logs via SSE with polling +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function matchesFilter(row, filter) { + if (!filter) return true; + const type = (row.error_type || row.type || '').toString().toLowerCase(); + return type.includes(String(filter).toLowerCase()); +} + +const streamTool = { + description: 'Real-time log streaming using polling and SSE. Optional filter by type.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + endpoint: { type: 'string' }, + interval: { type: 'integer', minimum: 250 }, + filter: { type: 'string' }, + startLastId: { type: 'string', description: 'Starting last id (default 0)' }, + maxDuration: { type: 'integer', description: 'Optional max duration ms' } + } + }, + streamHandler: async (params, { writer, Gateway: GatewayOverride } = {}) => { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = GatewayOverride || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const interval = Number(params?.interval) || 3000; + const filter = params?.filter; + let lastId = (params && params.startLastId != null) ? String(params.startLastId) : '0'; + const seen = new Set(); + const started = Date.now(); + const maxDuration = params?.maxDuration && Number(params.maxDuration) > 0 ? Number(params.maxDuration) : null; + let elapsed = 0; + let doneTimer = null; + if (maxDuration) { + doneTimer = setTimeout(() => { + try { writer({ event: 'done', data: '' }); } catch (_) {} + }, maxDuration); + } + + writer({ event: 'data', data: JSON.stringify({ type: 'info', message: 'logs.stream started', env: params?.env || auth.source, interval }) }); + + const tick = async () => { + try { + const resp = await gateway.logs({ lastId }); + const list = (resp && resp.logs) || []; + if (list.length > 0) { + let maxId = lastId; + for (const row of list) { + if (seen.has(row.id)) continue; + seen.add(row.id); + if (matchesFilter(row, filter)) { + writer({ event: 'data', data: JSON.stringify(row) }); + } + const curr = Number(row.id); + if (!Number.isNaN(curr)) { + const prev = Number(maxId); + if (Number.isNaN(prev) || curr > prev) maxId = String(row.id); + } + } + lastId = maxId; + } + } catch (e) { + writer({ event: 'error', data: String(e) }); + } + }; + + let timer = null; + const schedule = () => { + timer = setTimeout(async function run() { + await tick(); + if (maxDuration) { + elapsed += interval; + if (elapsed >= maxDuration) { + writer({ event: 'done', data: '' }); + return; + } + } + schedule(); + }, interval); + }; + + // initial tick immediately + await tick(); + schedule(); + + // return a promise that never resolves; http-server will end when streamHandler resolves/rejects + return new Promise(() => {}); + } +}; + +export default streamTool; diff --git a/mcp-min/migrations/generate.js b/mcp-min/migrations/generate.js new file mode 100644 index 000000000..760e9a3b9 --- /dev/null +++ b/mcp-min/migrations/generate.js @@ -0,0 +1,86 @@ +// platformos.migrations.generate - create a new migration via Gateway and optionally write the file locally +import fs from 'fs'; +import path from 'path'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; +import dir from '../../lib/directories.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function ensureMigrationsDir() { + const appDirectory = fs.existsSync(dir.APP) ? dir.APP : dir.LEGACY_APP; + const migrationsDir = path.posix.join(appDirectory, 'migrations'); + return migrationsDir; +} + +const generateMigrationTool = { + description: 'Generate a migration on server and write local file unless skipWrite=true.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + name: { type: 'string', description: 'Base name of the migration, without timestamp' }, + skipWrite: { type: 'boolean', description: 'When true, do not create local file', default: false }, + endpoint: { type: 'string', description: 'Override API base URL' } + }, + required: ['name'] + }, + handler: async (params = {}, ctx = {}) => { + try { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const formData = { name: params.name }; + const raw = await gateway.generateMigration(formData); + + let filePath = null; + if (!params.skipWrite) { + const migrationsDir = ensureMigrationsDir(); + // Ensure directory exists + fs.mkdirSync(migrationsDir, { recursive: true }); + filePath = path.posix.join(migrationsDir, `${raw.name}.liquid`); + fs.writeFileSync(filePath, raw.body); + } + + const data = { + name: raw.name, + bodyLength: typeof raw.body === 'string' ? raw.body.length : null, + filePath + }; + + return { status: 'ok', data, raw }; + } catch (e) { + return { status: 'error', error: { code: 'MIGRATIONS_GENERATE_ERROR', message: String(e?.message || e) } }; + } + } +}; + +export default generateMigrationTool; diff --git a/mcp-min/migrations/list.js b/mcp-min/migrations/list.js new file mode 100644 index 000000000..60937b744 --- /dev/null +++ b/mcp-min/migrations/list.js @@ -0,0 +1,65 @@ +// platformos.migrations.list - list migrations and their statuses via Gateway +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + // precedence: explicit params -> env (MPKIT_*) -> .pos by env -> first .pos + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const listMigrationsTool = { + description: 'List migrations deployed to the server with their current status.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + endpoint: { type: 'string', description: 'Override API base URL' } + } + }, + handler: async (params = {}, ctx = {}) => { + try { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const raw = await gateway.listMigrations(); + const migrations = Array.isArray(raw?.migrations) ? raw.migrations.map((m) => ({ + id: m.id, + name: m.name, + state: m.state, + error_messages: m.error_messages || null + })) : []; + + return { status: 'ok', data: { migrations }, raw }; + } catch (e) { + return { status: 'error', error: { code: 'MIGRATIONS_LIST_ERROR', message: String(e?.message || e) } }; + } + } +}; + +export default listMigrationsTool; diff --git a/mcp-min/migrations/run.js b/mcp-min/migrations/run.js new file mode 100644 index 000000000..3c9078737 --- /dev/null +++ b/mcp-min/migrations/run.js @@ -0,0 +1,66 @@ +// platformos.migrations.run - run a specific migration by name or timestamp via Gateway +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function buildFormData({ timestamp, name }) { + if (!timestamp && !name) throw new Error('INVALID_INPUT: Provide timestamp or name'); + // API expects { timestamp } and supports value being either full name or just numeric timestamp + return { timestamp: timestamp || name }; +} + +const runMigrationTool = { + description: 'Run a specific migration identified by timestamp or full name.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string' }, + url: { type: 'string' }, + email: { type: 'string' }, + token: { type: 'string' }, + timestamp: { type: 'string', description: 'Numeric timestamp' }, + name: { type: 'string', description: 'Alias for timestamp; full migration name without .liquid' }, + endpoint: { type: 'string', description: 'Override API base URL' } + } + }, + handler: async (params = {}, ctx = {}) => { + try { + const auth = await resolveAuth(params); + const baseUrl = params?.endpoint ? params.endpoint : auth.url; + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: baseUrl, token: auth.token, email: auth.email }); + + const raw = await gateway.runMigration(buildFormData(params)); + const data = { name: raw?.name || null, status: 'executed' }; + return { status: 'ok', data, raw }; + } catch (e) { + return { status: 'error', error: { code: 'MIGRATIONS_RUN_ERROR', message: String(e?.message || e) } }; + } + } +}; + +export default runMigrationTool; diff --git a/mcp-min/package-lock.json b/mcp-min/package-lock.json new file mode 100644 index 000000000..ea19ff0c1 --- /dev/null +++ b/mcp-min/package-lock.json @@ -0,0 +1,811 @@ +{ + "name": "mcp-min", + "version": "0.1.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "mcp-min", + "version": "0.1.0", + "dependencies": { + "body-parser": "^1.20.2", + "express": "^4.18.2", + "morgan": "^1.10.0" + } + }, + "node_modules/accepts": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz", + "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==" + }, + "node_modules/basic-auth": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", + "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==", + "dependencies": { + "safe-buffer": "5.1.2" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/basic-auth/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/body-parser": { + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", + "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", + "dependencies": { + "bytes": "~3.1.2", + "content-type": "~1.0.5", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "~1.2.0", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "on-finished": "~2.4.1", + "qs": "~6.14.0", + "raw-body": "~2.5.3", + "type-is": "~1.6.18", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", + "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", + "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==" + }, + "node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==" + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/express": { + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", + "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "~1.20.3", + "content-disposition": "~0.5.4", + "content-type": "~1.0.4", + "cookie": "~0.7.1", + "cookie-signature": "~1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "~1.3.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.0", + "merge-descriptors": "1.0.3", + "methods": "~1.1.2", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "~0.1.12", + "proxy-addr": "~2.0.7", + "qs": "~6.14.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "~0.19.0", + "serve-static": "~1.16.2", + "setprototypeof": "1.2.0", + "statuses": "~2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/finalhandler": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", + "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "on-finished": "~2.4.1", + "parseurl": "~1.3.3", + "statuses": "~2.0.2", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz", + "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==", + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/morgan": { + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.1.tgz", + "integrity": "sha512-223dMRJtI/l25dJKWpgij2cMtywuG/WiUKXdvwfbhGKBhy1puASqXwFzmWZ7+K73vUPoR7SS2Qz2cI/g9MKw0A==", + "dependencies": { + "basic-auth": "~2.0.1", + "debug": "2.6.9", + "depd": "~2.0.0", + "on-finished": "~2.3.0", + "on-headers": "~1.1.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/morgan/node_modules/on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/on-headers": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.12", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", + "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/qs": { + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", + "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "node_modules/send": { + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", + "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.1", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "~2.4.1", + "range-parser": "~1.2.1", + "statuses": "~2.0.2" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/serve-static": { + "version": "1.16.3", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz", + "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==", + "dependencies": { + "encodeurl": "~2.0.0", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "~0.19.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==" + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", + "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "engines": { + "node": ">= 0.8" + } + } + } +} diff --git a/mcp-min/package.json b/mcp-min/package.json new file mode 100644 index 000000000..f384c9d28 --- /dev/null +++ b/mcp-min/package.json @@ -0,0 +1,16 @@ +{ + "name": "mcp-min", + "version": "0.2.0", + "description": "Minimal MCP server (stdio + HTTP with SSE) as a subpackage", + "type": "module", + "main": "index.js", + "scripts": { + "start": "node index.js", + "start:stdio": "node stdio-server.js", + "start:debug": "MCP_MIN_DEBUG=1 node index.js" + }, + "dependencies": { + "express": "^4.18.2", + "body-parser": "^1.20.2" + } +} diff --git a/mcp-min/portal/endpoints-list.js b/mcp-min/portal/endpoints-list.js new file mode 100644 index 000000000..df56a6e9c --- /dev/null +++ b/mcp-min/portal/endpoints-list.js @@ -0,0 +1,59 @@ +// endpoints-list tool - List available regions/endpoints from Partner Portal API +import log from '../log.js'; +import { getPortalConfig, portalRequest } from './portal-client.js'; + +const endpointsListTool = { + description: 'List available regions/endpoints for instance creation from Partner Portal.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: {}, + required: [] + }, + + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:endpoints-list invoked'); + + try { + const configFn = ctx.getPortalConfig || getPortalConfig; + const requestFn = ctx.portalRequest || portalRequest; + const config = ctx.portalConfig || configFn(); + + log.debug('endpoints-list: fetching endpoints'); + const response = await requestFn({ + method: 'GET', + path: '/api/endpoints', + config + }); + + const endpoints = Array.isArray(response) ? response : (response.endpoints || []); + + return { + ok: true, + data: { + endpoints: endpoints.map(e => ({ + id: e.id, + name: e.name, + url: e.url, + region: e.region + })), + count: endpoints.length + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } catch (e) { + log.error('endpoints-list: error', { error: e.message, status: e.status }); + return { + ok: false, + error: { + code: 'ENDPOINTS_LIST_ERROR', + message: String(e.message || e) + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } +}; + +export default endpointsListTool; diff --git a/mcp-min/portal/env-add.js b/mcp-min/portal/env-add.js new file mode 100644 index 000000000..941d13c1d --- /dev/null +++ b/mcp-min/portal/env-add.js @@ -0,0 +1,376 @@ +// env-add tool - Add environment using device authorization flow +import log from '../log.js'; +import { getPortalConfig, portalRequest } from './portal-client.js'; +import fs from 'fs'; +import path from 'path'; + +// Default Partner Portal URL +const DEFAULT_PORTAL_URL = 'https://partners.platformos.com'; + +// Track active background waiters +const activeWaiters = new Map(); + +// Cleanup on process exit +process.on('exit', () => { + log.debug('Process exit, clearing', { waiterCount: activeWaiters.size }); + activeWaiters.clear(); +}); + +/** + * Get Portal URL with priority: + * 1. override parameter + * 2. PARTNER_PORTAL_URL env var + * 3. ~/.config/pos-cli/config.json partner_portal_url + * 4. default (partners.platformos.com) + */ +function getPortalUrl(override) { + if (override) return override; + + if (process.env.PARTNER_PORTAL_URL) { + return process.env.PARTNER_PORTAL_URL; + } + + try { + const config = getPortalConfig(); + if (config.partner_portal_url) { + return config.partner_portal_url; + } + } catch { + // Config not found, use default + } + return DEFAULT_PORTAL_URL; +} + +const envAddTool = { + description: 'Add environment to .pos config. Returns verification URL immediately, spawns background waiter (60s) that saves token when user authorizes.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + environment: { + type: 'string', + description: 'Environment name (e.g., staging, production)' + }, + url: { + type: 'string', + description: 'Instance URL (e.g., https://my-app.staging.oregon.platform-os.com)' + }, + token: { + type: 'string', + description: 'Optional: Direct API token (skips device authorization if provided)' + }, + email: { + type: 'string', + description: 'Optional: Email associated with the account' + }, + partner_portal_url: { + type: 'string', + description: 'Optional: Partner Portal URL (reads from ~/.config/pos-cli/config.json if not provided)' + }, + timeout_seconds: { + type: 'number', + description: 'Optional: Max seconds to wait for authorization (default: 60, max: 120)' + } + }, + required: ['environment', 'url'] + }, + + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.info('handler:START', { environment: params.environment, url: params.url, params }); + + try { + const portalUrl = ctx.portalUrl || getPortalUrl(params.partner_portal_url); + const timeoutSeconds = Math.min(params.timeout_seconds || 60, 120); + log.debug('handler:config', { portalUrl, timeoutSeconds }); + + // Normalize URL (ensure trailing slash) + let instanceUrl = params.url; + if (!instanceUrl.endsWith('/')) { + instanceUrl = instanceUrl + '/'; + } + + // Validate URL format + let instanceDomain; + try { + instanceDomain = new URL(instanceUrl).hostname; + } catch { + return { + ok: false, + error: { code: 'INVALID_URL', message: `Invalid URL format: ${params.url}` }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + const fetchFn = ctx.fetch || fetch; + + // Direct token provided - skip device auth + if (params.token) { + log.info('handler:usingProvidedToken'); + + const storeEnvFn = ctx.storeEnvironment || storeEnvironment; + storeEnvFn({ + environment: params.environment, + url: instanceUrl, + token: params.token, + email: params.email, + partner_portal_url: portalUrl + }); + + return { + ok: true, + data: { + environment: params.environment, + url: instanceUrl, + message: `Environment "${params.environment}" added successfully.` + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + // Device authorization flow - get verification URL + log.info('handler:startingDeviceAuth', { instanceDomain, portalUrl }); + + let deviceAuthResponse; + try { + const authUrl = `${portalUrl}/oauth/authorize_device`; + log.debug('handler:requestingDeviceAuth', { authUrl }); + + const response = await fetchFn(authUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: `domain=${encodeURIComponent(instanceDomain)}` + }); + + if (response.status === 404) { + return { + ok: false, + error: { + code: 'INSTANCE_NOT_REGISTERED', + message: `Instance ${instanceUrl} is not registered in the Partner Portal. Verify the URL is correct.` + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + if (!response.ok) { + const text = await response.text(); + throw new Error(`Device authorization failed: ${response.status} ${text}`); + } + + deviceAuthResponse = await response.json(); + } catch (e) { + return { + ok: false, + error: { code: 'DEVICE_AUTH_FAILED', message: String(e.message || e) }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + const verificationUrl = deviceAuthResponse.verification_uri_complete; + const deviceCode = deviceAuthResponse.device_code; + const pollInterval = (deviceAuthResponse.interval || 5) * 1000; + const waiterId = `${params.environment}-${Date.now()}`; + + log.info('handler:deviceAuthSuccess', { verificationUrl, waiterId, pollInterval }); + + // Spawn background waiter + const waiterPromise = spawnBackgroundWaiter({ + waiterId, + deviceCode, + portalUrl, + pollInterval, + timeoutSeconds, + environment: params.environment, + instanceUrl, + email: params.email, + fetchFn, + storeEnvFn: ctx.storeEnvironment || storeEnvironment + }); + + // Store waiter reference + activeWaiters.set(waiterId, waiterPromise); + + // Log waiter completion (success or failure) + waiterPromise.then(result => { + log.info('handler:waiterComplete', { waiterId, result }); + activeWaiters.delete(waiterId); + }).catch(err => { + log.error('handler:waiterError', { waiterId, error: err.message }); + activeWaiters.delete(waiterId); + }); + + // Return immediately with verification URL + return { + ok: true, + data: { + status: 'awaiting_authorization', + message: `Open the URL below to authorize. Background waiter active for ${timeoutSeconds}s - will save credentials automatically when you authorize.`, + verification_url: verificationUrl, + waiter_id: waiterId, + timeout_seconds: timeoutSeconds + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + + } catch (e) { + log.error('handler:error', { error: e.message }); + return { + ok: false, + error: { code: 'ENV_ADD_ERROR', message: String(e.message || e) }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } +}; + +/** + * Background waiter that polls for authorization + */ +async function spawnBackgroundWaiter({ + waiterId, + deviceCode, + portalUrl, + pollInterval, + timeoutSeconds, + environment, + instanceUrl, + email, + fetchFn, + storeEnvFn +}) { + log.info('waiter:start', { waiterId, timeoutSeconds, pollInterval, environment, instanceUrl }); + + const pollEndTime = Date.now() + (timeoutSeconds * 1000); + + try { + let pollCount = 0; + while (Date.now() < pollEndTime) { + pollCount++; + const remaining = Math.ceil((pollEndTime - Date.now()) / 1000); + log.debug('waiter:poll', { pollCount, remaining, waiterId }); + await sleep(pollInterval); + + // Check if waiter was cancelled + if (!activeWaiters.has(waiterId)) { + log.info('waiter:cancelled', { waiterId }); + return { status: 'cancelled' }; + } + + try { + const tokenUrl = `${portalUrl}/oauth/device_token`; + log.debug('waiter:fetchingToken', { tokenUrl }); + const tokenResponse = await fetchFn(tokenUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: `grant_type=urn:ietf:params:oauth:grant-type:device_code&device_code=${encodeURIComponent(deviceCode)}` + }); + + log.debug('waiter:tokenResponseStatus', { status: tokenResponse.status }); + const tokenData = await tokenResponse.json(); + log.debug('waiter:tokenData', tokenData); + + if (tokenData.access_token) { + log.info('waiter:accessTokenReceived'); + + try { + log.debug('waiter:callingStoreEnvironment'); + storeEnvFn({ + environment, + url: instanceUrl, + token: tokenData.access_token, + email, + partner_portal_url: portalUrl + }); + log.info('waiter:storeEnvironmentSuccess'); + } catch (e) { + log.error('waiter:storeEnvironmentError', { error: e.message }); + activeWaiters.delete(waiterId); + return { status: 'error', error: `Failed to save environment: ${e.message}` }; + } + + activeWaiters.delete(waiterId); + log.info('waiter:success', { environment }); + return { status: 'success', environment }; + } + + if (tokenData.error === 'authorization_pending') { + log.debug('waiter:authorizationPending'); + continue; + } + + if (tokenData.error === 'slow_down') { + log.warn('waiter:slowDown'); + await sleep(pollInterval); + continue; + } + + if (tokenData.error === 'expired_token' || tokenData.error === 'access_denied') { + log.warn('waiter:authFailed', { error: tokenData.error }); + activeWaiters.delete(waiterId); + return { status: 'failed', error: tokenData.error }; + } + + log.warn('waiter:unknownError', tokenData.error); + + } catch (e) { + log.error('waiter:pollError', { error: e.message }); + // Continue polling on network errors + } + } + + // Timeout + log.warn('waiter:timeout', { waiterId }); + activeWaiters.delete(waiterId); + return { status: 'timeout' }; + + } catch (e) { + log.error('waiter:exception', { error: e.message }); + activeWaiters.delete(waiterId); + return { status: 'error', error: e.message }; + } +} + +function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +function storeEnvironment(settings) { + log.debug('storeEnvironment:start', settings); + + // Use .pos file in the directory where MCP server was started + const configPath = path.join(process.cwd(), '.pos'); + log.debug('storeEnvironment:configPath', { configPath, cwd: process.cwd() }); + + let config = {}; + if (fs.existsSync(configPath)) { + log.debug('storeEnvironment:existingFileFound'); + try { + config = JSON.parse(fs.readFileSync(configPath, 'utf8')); + log.debug('storeEnvironment:existingConfig', config); + } catch (e) { + log.warn('storeEnvironment:parseError', { error: e.message }); + } + } else { + log.debug('storeEnvironment:creatingNew'); + } + + config[settings.environment] = { + url: settings.url, + token: settings.token, + email: settings.email, + partner_portal_url: settings.partner_portal_url + }; + + log.debug('storeEnvironment:newConfig', config); + + try { + fs.writeFileSync(configPath, JSON.stringify(config, null, 2)); + log.info('storeEnvironment:success', { configPath }); + } catch (e) { + log.error('storeEnvironment:writeError', { error: e.message }); + throw new Error(`Failed to write .pos file: ${e.message}`); + } +} + +export default envAddTool; diff --git a/mcp-min/portal/instance-create.js b/mcp-min/portal/instance-create.js new file mode 100644 index 000000000..c5d66131c --- /dev/null +++ b/mcp-min/portal/instance-create.js @@ -0,0 +1,106 @@ +// instance-create tool - Create a new platformOS instance via Partner Portal API +import log from '../log.js'; +import { getPortalConfig, portalRequest } from './portal-client.js'; + +const instanceCreateTool = { + description: 'Create a new platformOS instance via Partner Portal API. Returns job acknowledgment.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + name: { + type: 'string', + description: 'Instance name (subdomain). Will be validated for availability.' + }, + partner_id: { + type: 'number', + description: 'Partner ID (use partners-list tool to find)' + }, + endpoint_id: { + type: 'number', + description: 'Region/endpoint ID (use endpoints-list tool to find)' + }, + billing_plan_id: { + type: 'number', + description: 'Billing plan ID (use partners-list to see available plans)' + }, + tags: { + type: 'array', + items: { type: 'string' }, + description: 'Optional tags for the instance' + } + }, + required: ['name', 'partner_id', 'endpoint_id', 'billing_plan_id'] + }, + + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:instance-create invoked', { name: params.name }); + + try { + // Allow injection for testing + const configFn = ctx.getPortalConfig || getPortalConfig; + const requestFn = ctx.portalRequest || portalRequest; + const config = ctx.portalConfig || configFn(); + + // 1. Validate instance name availability + log.debug('instance-create: checking name availability', { name: params.name }); + const nameCheck = await requestFn({ + method: 'GET', + path: `/api/instance_name_checks/${encodeURIComponent(params.name)}`, + config + }); + + if (!nameCheck.available) { + return { + ok: false, + error: { + code: 'NAME_UNAVAILABLE', + message: `Instance name "${params.name}" is not available` + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + // 2. Create instance + log.debug('instance-create: creating instance', { name: params.name, partner_id: params.partner_id }); + const response = await requestFn({ + method: 'POST', + path: '/api/tasks/instance/create', + body: { + instance_billing_plan_type_id: params.billing_plan_id, + partner_id: params.partner_id, + instance_params: { + endpoint_id: params.endpoint_id, + name: params.name, + tag_list: params.tags || [] + } + }, + config + }); + + return { + ok: true, + data: { + acknowledged: response.acknowledged, + name: params.name, + message: 'Instance creation started. It may take a few minutes to complete.' + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } catch (e) { + log.error('instance-create: error', { error: e.message, status: e.status }); + return { + ok: false, + error: { + code: e.status === 422 ? 'VALIDATION_ERROR' : 'INSTANCE_CREATE_ERROR', + message: String(e.message || e), + details: e.data + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } +}; + +export default instanceCreateTool; diff --git a/mcp-min/portal/partner-get.js b/mcp-min/portal/partner-get.js new file mode 100644 index 000000000..e5a37346a --- /dev/null +++ b/mcp-min/portal/partner-get.js @@ -0,0 +1,73 @@ +// partner-get tool - Get partner details and billing plans from Partner Portal API +import log from '../log.js'; +import { getPortalConfig, portalRequest } from './portal-client.js'; + +const partnerGetTool = { + description: 'Get partner details including available billing plans from Partner Portal.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + partner_id: { + type: 'number', + description: 'Partner ID to fetch details for' + } + }, + required: ['partner_id'] + }, + + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:partner-get invoked', { partner_id: params.partner_id }); + + try { + const configFn = ctx.getPortalConfig || getPortalConfig; + const requestFn = ctx.portalRequest || portalRequest; + const config = ctx.portalConfig || configFn(); + + log.debug('partner-get: fetching partner details', { partner_id: params.partner_id }); + const partner = await requestFn({ + method: 'GET', + path: `/api/partners/${params.partner_id}`, + config + }); + + // Extract billing plans with full details + const billingPlans = (partner.instance_billing_plan_types || []).map(plan => ({ + id: plan.id, + name: plan.name, + code: plan.code, + description: plan.description, + price: plan.price, + currency: plan.currency + })); + + return { + ok: true, + data: { + partner: { + id: partner.id, + name: partner.name, + email: partner.email, + created_at: partner.created_at + }, + billing_plans: billingPlans, + billing_plans_count: billingPlans.length + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } catch (e) { + log.error('partner-get: error', { error: e.message, status: e.status }); + return { + ok: false, + error: { + code: e.status === 404 ? 'PARTNER_NOT_FOUND' : 'PARTNER_GET_ERROR', + message: String(e.message || e) + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } +}; + +export default partnerGetTool; diff --git a/mcp-min/portal/partners-list.js b/mcp-min/portal/partners-list.js new file mode 100644 index 000000000..7412e6257 --- /dev/null +++ b/mcp-min/portal/partners-list.js @@ -0,0 +1,89 @@ +// partners-list tool - List partners and their billing plans from Partner Portal API +import log from '../log.js'; +import { getPortalConfig, portalRequest } from './portal-client.js'; + +const partnersListTool = { + description: 'List partners from Partner Portal. Optionally fetch billing plans for a specific partner.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + partner_id: { + type: 'number', + description: 'Optional: fetch details and billing plans for a specific partner' + } + }, + required: [] + }, + + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:partners-list invoked', { partner_id: params.partner_id }); + + try { + const configFn = ctx.getPortalConfig || getPortalConfig; + const requestFn = ctx.portalRequest || portalRequest; + const config = ctx.portalConfig || configFn(); + + if (params.partner_id) { + // Fetch specific partner with billing plans + log.debug('partners-list: fetching partner details', { partner_id: params.partner_id }); + const partner = await requestFn({ + method: 'GET', + path: `/api/partners/${params.partner_id}`, + config + }); + + return { + ok: true, + data: { + partner: { + id: partner.id, + name: partner.name, + billing_plans: (partner.instance_billing_plan_types || []).map(plan => ({ + id: plan.id, + name: plan.name, + code: plan.code + })) + } + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + // List all partners + log.debug('partners-list: fetching all partners'); + const response = await requestFn({ + method: 'GET', + path: '/api/partners', + config + }); + + const partners = Array.isArray(response) ? response : (response.partners || []); + + return { + ok: true, + data: { + partners: partners.map(p => ({ + id: p.id, + name: p.name + })), + count: partners.length + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } catch (e) { + log.error('partners-list: error', { error: e.message, status: e.status }); + return { + ok: false, + error: { + code: e.status === 404 ? 'PARTNER_NOT_FOUND' : 'PARTNERS_LIST_ERROR', + message: String(e.message || e) + }, + meta: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } +}; + +export default partnersListTool; diff --git a/mcp-min/portal/portal-client.js b/mcp-min/portal/portal-client.js new file mode 100644 index 000000000..670fb285a --- /dev/null +++ b/mcp-min/portal/portal-client.js @@ -0,0 +1,84 @@ +// Shared Portal API client for Partner Portal interactions +// Reads configuration from ~/.config/pos-cli/config.json +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import log from '../log.js'; + +/** + * Get Portal configuration from ~/.config/pos-cli/config.json + * @returns {{ master_token: string, partner_portal_url: string }} + */ +function getPortalConfig() { + const configPath = path.join(os.homedir(), '.config', 'pos-cli', 'config.json'); + if (!fs.existsSync(configPath)) { + throw new Error(`Portal config not found at ${configPath}. Run 'pos-cli env add' to configure.`); + } + const config = JSON.parse(fs.readFileSync(configPath, 'utf8')); + if (!config.master_token) { + throw new Error('master_token not found in Portal config'); + } + if (!config.partner_portal_url) { + throw new Error('partner_portal_url not found in Portal config'); + } + return config; +} + +/** + * Make an authenticated request to the Partner Portal API + * @param {Object} options + * @param {string} [options.method='GET'] - HTTP method + * @param {string} options.path - API path (e.g., '/api/partners') + * @param {Object} [options.body] - Request body (will be JSON stringified) + * @param {string} [options.token] - Override token (defaults to master_token from config) + * @param {string} [options.baseUrl] - Override base URL (defaults to partner_portal_url from config) + * @param {Object} [options.config] - Override full config (for testing) + * @returns {Promise} - Parsed JSON response + */ +async function portalRequest({ method = 'GET', path: apiPath, body, token, baseUrl, config }) { + const cfg = config || getPortalConfig(); + const url = `${baseUrl || cfg.partner_portal_url}${apiPath}`; + const authToken = token || cfg.master_token; + + log.debug('portal:request', { method, url, hasBody: !!body }); + + const headers = { + 'Authorization': `Bearer ${authToken}`, + 'Content-Type': 'application/json', + 'Accept': 'application/json' + }; + + const response = await fetch(url, { + method, + headers, + body: body ? JSON.stringify(body) : undefined + }); + + log.debug('portal:response', { status: response.status, statusText: response.statusText }); + + // Handle non-JSON responses + const contentType = response.headers.get('content-type') || ''; + if (!contentType.includes('application/json')) { + const text = await response.text(); + if (!response.ok) { + const error = new Error(`Portal API error: ${response.status} ${response.statusText}`); + error.status = response.status; + error.body = text; + throw error; + } + return { raw: text }; + } + + const data = await response.json(); + + if (!response.ok) { + const error = new Error(data.error || data.message || `Portal API error: ${response.status}`); + error.status = response.status; + error.data = data; + throw error; + } + + return data; +} + +export { getPortalConfig, portalRequest }; diff --git a/mcp-min/sse.js b/mcp-min/sse.js new file mode 100644 index 000000000..be3dd0778 --- /dev/null +++ b/mcp-min/sse.js @@ -0,0 +1,41 @@ +// Minimal SSE utilities: framing and heartbeat +import log from './log.js'; + +export const HEARTBEAT_INTERVAL_MS = 15000; // 15s + +export function sseHandler(req, res) { + res.writeHead(200, { + 'Content-Type': 'text/event-stream; charset=utf-8', + 'Cache-Control': 'no-cache, no-transform', + Connection: 'keep-alive' + }); + // initial comment to establish stream + res.write(': connected\n\n'); + log.debug('SSE connection established'); + + // heartbeat + const interval = setInterval(() => { + try { + res.write(': heartbeat\n\n'); + log.debug('SSE heartbeat'); + } catch (e) { + // ignore + } + }, HEARTBEAT_INTERVAL_MS); + + req.on('close', () => { + clearInterval(interval); + log.debug('SSE connection cleaned up'); + }); +} + +export function writeSSE(res, { event, data }) { + // data is a string; escape newlines per SSE spec by sending multiple data: lines + if (event) res.write(`event: ${event}\n`); + if (data == null) data = ''; + const lines = String(data).split('\n'); + for (const line of lines) { + res.write(`data: ${line}\n`); + } + res.write('\n'); +} diff --git a/mcp-min/stdio-server.js b/mcp-min/stdio-server.js new file mode 100644 index 000000000..5951fec4f --- /dev/null +++ b/mcp-min/stdio-server.js @@ -0,0 +1,215 @@ +import { createInterface } from 'readline'; +import { fileURLToPath } from 'url'; +import path from 'path'; +import tools from './tools.js'; +import { DEBUG } from './config.js'; +import log from './log.js'; + +// MCP stdio server implementing JSON-RPC 2.0 protocol +// Supports: initialize, notifications/initialized, tools/list, tools/call + +const rl = createInterface({ input: process.stdin, output: process.stdout, terminal: false }); + +const SERVER_INFO = { + name: 'pos-cli-mcp', + version: '0.1.0' +}; + +const SERVER_CAPABILITIES = { + tools: {} +}; + +function send(obj) { + if (!process.stdout.writable) return; + const line = JSON.stringify(obj); + try { + process.stdout.write(line + '\n'); + } catch (err) { + if (err.code === 'EPIPE' || err.code === 'ERR_STREAM_DESTROYED') { + log.debug('stdout closed, exiting'); + process.exit(0); + } + throw err; + } +} + +function sendResult(id, result) { + send({ jsonrpc: '2.0', id, result }); +} + +function sendError(id, code, message, data) { + const error = { code, message }; + if (data !== undefined) error.data = data; + send({ jsonrpc: '2.0', id, error }); +} + +// Build tools list for MCP tools/list response +function getToolsList() { + return Object.entries(tools).map(([name, tool]) => ({ + name, + description: tool.description || '', + inputSchema: tool.inputSchema || { type: 'object', properties: {} } + })); +} + +// MCP protocol handlers +const mcpHandlers = { + 'initialize': async (params, id) => { + log.debug('MCP initialize', { params }); + sendResult(id, { + protocolVersion: '2024-11-05', + serverInfo: SERVER_INFO, + capabilities: SERVER_CAPABILITIES + }); + }, + + 'notifications/initialized': async () => { + // Notification - no response needed + log.debug('MCP initialized notification received'); + }, + + 'tools/list': async (params, id) => { + log.debug('MCP tools/list'); + sendResult(id, { tools: getToolsList() }); + }, + + 'tools/call': async (params, id) => { + const { name, arguments: args, _meta } = params || {}; + const progressToken = _meta?.progressToken; + log.debug('MCP tools/call', { name, args, progressToken }); + + const tool = tools[name]; + if (!tool) { + sendError(id, -32601, `Unknown tool: ${name}`); + return; + } + + // Send progress notification (keeps connection alive, prevents client timeout) + let progressCounter = 0; + function sendProgress(current, total, message) { + if (!progressToken) return; + const notification = { + jsonrpc: '2.0', + method: 'notifications/progress', + params: { progressToken, progress: current } + }; + if (total != null) notification.params.total = total; + if (message) notification.params.message = message; + send(notification); + } + + // Heartbeat: send periodic progress while tool runs to prevent timeout + const heartbeat = progressToken + ? setInterval(() => { sendProgress(++progressCounter, undefined, 'working'); }, 5000) + : null; + + try { + const result = await tool.handler(args || {}, { + transport: 'stdio', + debug: DEBUG, + log: log.info.bind(log), + sendProgress + }); + sendResult(id, { + content: [{ type: 'text', text: JSON.stringify(result, null, 2) }] + }); + } catch (err) { + sendError(id, -32000, String(err)); + } finally { + if (heartbeat) clearInterval(heartbeat); + } + } +}; + +export default function startStdio() { + log.info('stdio transport started (MCP protocol)'); + + // Exit cleanly when the MCP client disconnects (closes the pipe) + process.stdout.on('error', (err) => { + if (err.code === 'EPIPE' || err.code === 'ERR_STREAM_DESTROYED') { + log.debug('stdout pipe closed, exiting'); + process.exit(0); + } + log.error('stdout error', err.message); + }); + + rl.on('line', async (line) => { + const raw = line; + line = line.trim(); + if (!line) return; + + let msg; + try { + msg = JSON.parse(line); + } catch (e) { + log.debug('STDIO received invalid JSON', { raw }); + sendError(null, -32700, 'Parse error'); + return; + } + + const { jsonrpc, id, method, params } = msg; + log.debug('STDIO request', { id, method, params }); + + // Handle MCP protocol methods + const mcpHandler = mcpHandlers[method]; + if (mcpHandler) { + await mcpHandler(params, id); + return; + } + + // Fallback: direct tool invocation (legacy/custom protocol) + const tool = tools[method]; + if (tool) { + try { + const result = await tool.handler(params || {}, { transport: 'stdio', debug: DEBUG, log: log.info.bind(log) }); + if (jsonrpc === '2.0') { + sendResult(id, result); + } else { + send({ id, result }); + } + log.debug('STDIO response', { id, method, result }); + } catch (err) { + if (jsonrpc === '2.0') { + sendError(id, -32000, String(err)); + } else { + send({ id, error: String(err) }); + } + log.debug('STDIO error', { id, method, err: String(err) }); + } + return; + } + + // Unknown method + if (jsonrpc === '2.0') { + sendError(id, -32601, `Method not found: ${method}`); + } else { + send({ id, error: `unknown_method: ${method}` }); + } + log.debug('STDIO unknown method', { id, method }); + }); +} + +// Parse --cwd or -C argument +function parseCwd(argv) { + for (let i = 2; i < argv.length; i++) { + if ((argv[i] === '--cwd' || argv[i] === '-C') && argv[i + 1]) { + return argv[i + 1]; + } + if (argv[i].startsWith('--cwd=')) { + return argv[i].slice(6); + } + } + return null; +} + +// Auto-start when executed directly (node mcp-min/stdio-server.js) +const __filename = fileURLToPath(import.meta.url); +if (process.argv[1] && path.resolve(process.argv[1]) === __filename) { + const cwd = parseCwd(process.argv); + if (cwd) { + process.chdir(path.resolve(cwd)); + log.info(`working directory set to ${process.cwd()}`); + } + log.info(`log file: ${log.LOG_FILE}`); + startStdio(); +} diff --git a/mcp-min/sync/single-file.js b/mcp-min/sync/single-file.js new file mode 100644 index 000000000..136714b39 --- /dev/null +++ b/mcp-min/sync/single-file.js @@ -0,0 +1,289 @@ +// sync.singleFile tool extracted from tools.js for maintainability +import fs from 'fs'; +import path from 'path'; + +// Reuse pos-cli internals (ESM) +import files from '../../lib/files.js'; +import { fetchSettings, loadSettingsFileForModule } from '../../lib/settings.js'; +import shouldBeSynced from '../../lib/shouldBeSynced.js'; +import Gateway from '../../lib/proxy.js'; +import { presignDirectory } from '../../lib/presignUrl.js'; +import { uploadFileFormData } from '../../lib/s3UploadFile.js'; +import { manifestGenerateForAssets } from '../../lib/assets/manifest.js'; +import { fillInTemplateValues } from '../../lib/templates.js'; +import dir from '../../lib/directories.js'; +import log from '../log.js'; + +// Alias for backwards compatibility +const settings = { fetchSettings }; +const templates = { fillInTemplateValues }; + +// Helpers (kept local to this module) +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +function toPosix(p) { + return p.replace(/\\/g, '/'); +} + +async function resolveAuth(params) { + // precedence: explicit params -> env (MPKIT_*) -> .pos + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + // fallback: first env from .pos if present + const conf = files.getConfig(); + const firstEnv = Object.keys(conf || {})[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function normalizeLocalPath(filePathParam) { + const abs = path.resolve(filePathParam); + const rel = path.relative(process.cwd(), abs); + return toPosix(rel); +} + +function computeRemotePath(relPath) { + // remove leading app/ or marketplace_builder/ like pos-cli watch.filePathUnixified + const posix = toPosix(relPath); + const reApp = new RegExp(`^${dir.APP}/`); + const reLegacy = new RegExp(`^${dir.LEGACY_APP}/`); + return posix.replace(reApp, '').replace(reLegacy, ''); +} + +function isAssetsPath(relPath) { + return relPath.startsWith('app/assets') || /^modules\/\w+\/public\/assets/.test(relPath); +} + +async function uploadAsset({ gateway, relPath, log }) { + // Prepare direct upload data + const instance = await gateway.getInstance(); + const remoteAssetsDir = `instances/${instance.id}/assets`; + const data = await presignDirectory(remoteAssetsDir); + + const dirname = path.posix.dirname(relPath); + const fileSubdir = relPath.startsWith('app/assets') + ? dirname.replace('app/assets', '') + : '/' + dirname.replace('/public/assets', ''); + const key = data.fields.key.replace('assets/\${filename}', `assets${fileSubdir}/\${filename}`); + data.fields.key = key; + + log?.(`[sync-file] Uploading asset to S3: ${relPath}`); + log?.(`[sync-file] Presigned URL: ${data.url}`); + log?.(`[sync-file] FormData fields: ${JSON.stringify(Object.keys(data.fields))}`); + + await uploadFileFormData(relPath, data); + const manifest = manifestGenerateForAssets([relPath]); + await gateway.sendManifest(manifest); + return { ok: true }; +} + +async function uploadNonAsset({ gateway, relPath, log }) { + const remotePath = computeRemotePath(relPath); + const processTemplate = remotePath.startsWith('modules'); + let body; + if (processTemplate) { + const moduleName = relPath.split('/')[1]; + const moduleData = loadSettingsFileForModule(moduleName); + body = templates.fillInTemplateValues(relPath, moduleData); + log?.(`[sync-file] Processing template for module: ${moduleName}`); + } else { + body = fs.createReadStream(relPath); + log?.(`[sync-file] Streaming file: ${relPath}`); + } + const formData = { path: remotePath, marketplace_builder_file_body: body }; + log?.(`[sync-file] Sync formData: path=${remotePath}, body type=${processTemplate ? 'template' : 'stream'}`); + const resp = await gateway.sync(formData); + return { ok: true, response: resp }; +} + +async function deleteRemote({ gateway, relPath }) { + const remotePath = computeRemotePath(relPath); + const formData = { path: remotePath, primary_key: remotePath }; + const resp = await gateway.delete(formData); + return { ok: true, response: resp }; +} + +const singleFileTool = { + description: 'Sync a single file to a platformOS instance (upload or delete). Handles assets (direct S3 upload + manifest) and non-assets (gateway sync) automatically. Respects .posignore rules. Auth resolved from: explicit params > MPKIT_* env vars > .pos config. Use dryRun to validate without sending.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + filePath: { type: 'string', description: 'Absolute or relative path to the file to sync. Must be inside app/, marketplace_builder/, or modules/.' }, + env: { type: 'string', description: 'Environment name from .pos config (e.g., staging, production). Used to resolve auth when url/email/token are not provided.' }, + url: { type: 'string', description: 'Instance URL (e.g., https://my-app.staging.oregon.platform-os.com). Requires email and token.' }, + email: { type: 'string', description: 'Email for instance authentication. Required with url and token.' }, + token: { type: 'string', description: 'API token for instance authentication. Required with url and email.' }, + op: { type: 'string', enum: ['upload', 'delete'], description: 'Operation: "upload" to push file, "delete" to remove from instance. Auto-detected from file existence if omitted.' }, + dryRun: { type: 'boolean', description: 'Validate file path, auth, and sync rules without actually uploading. Default: false.' }, + confirmDelete: { type: 'boolean', description: 'Safety flag — must be true to execute delete operations. Default: false.' } + }, + required: ['filePath'] + }, +handler: async (params, ctx) => { + const startedAt = new Date().toISOString(); + const logFn = ctx?.log || log.info.bind(log); + const { filePath, op: opParam, dryRun = false, confirmDelete = false } = params || {}; + if (!filePath || typeof filePath !== 'string') { + throw new Error('INVALID_PARAM: filePath is required'); + } + + const relPath = normalizeLocalPath(filePath); + const absPath = path.resolve(filePath); + + logFn(`[sync-file] Processing file: ${filePath} (normalized: ${relPath})`); + + // Validate location + const allowedPrefixes = [dir.APP + '/', dir.LEGACY_APP + '/', dir.MODULES + '/']; + const inAllowedDir = allowedPrefixes.some((p) => toPosix(relPath).startsWith(p)); + if (!inAllowedDir) { + logFn(`[sync-file] File outside allowed directories: ${relPath}`); + return { + success: false, + operation: 'noop', + error: { code: 'FILE_OUTSIDE_ALLOWED_DIRECTORIES', message: `File must be inside ${allowedPrefixes.join(', ')}` }, + file: { localPath: filePath, normalizedPath: relPath } + }; + } + + const ignoreList = files.getIgnoreList(); + const should = shouldBeSynced(relPath, ignoreList); + logFn(`[sync-file] Sync check for ${relPath}: shouldSync=${should}, ignoreList rules=${ignoreList.length}`); + if (!should && opParam !== 'delete') { + return { + success: false, + operation: 'noop', + error: { code: 'IGNORED_BY_RULES', message: 'File is ignored by .posignore or rules' }, + file: { localPath: filePath, normalizedPath: relPath } + }; + } + + const exists = fs.existsSync(absPath); + const op = opParam || (exists ? 'upload' : 'delete'); + logFn(`[sync-file] Operation determined: ${op} (file exists: ${exists})`); + + // Resolve auth and prepare Gateway + const auth = await resolveAuth(params); + logFn(`[sync-file] Auth resolved from: ${auth.source}, URL: ${auth.url}`); + // set env vars expected by pos-cli internals (presignDirectory) + process.env.MARKETPLACE_EMAIL = auth.email; + process.env.MARKETPLACE_TOKEN = auth.token; + process.env.MARKETPLACE_URL = auth.url; + process.env.SYNC_SINGLE = 'true'; + + if (dryRun) { + return { + success: true, + operation: op, + file: { + localPath: filePath, + normalizedPath: relPath, + isAsset: isAssetsPath(relPath), + size: exists ? fs.statSync(absPath).size : null + }, + server: { responseCode: null, method: null }, + timings: { startedAt, finishedAt: new Date().toISOString(), durationMs: 0 }, + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + }; + } + + const gateway = new Gateway({ url: auth.url, token: auth.token, email: auth.email }); + + try { + if (op === 'delete') { + logFn(`[sync-file] Starting delete operation for: ${relPath}`); + if (!confirmDelete) { + return { + success: false, + operation: 'delete', + error: { code: 'DELETE_PROTECTED', message: 'confirmDelete=true is required to delete' }, + file: { localPath: filePath, normalizedPath: relPath } + }; + } + const result = await deleteRemote({ gateway, relPath }); + logFn(`[sync-file] Delete completed for: ${relPath}`); + return { + success: true, + operation: 'delete', + file: { localPath: filePath, normalizedPath: computeRemotePath(relPath) }, + server: { responseCode: 200, method: 'gateway.delete', gatewayResponse: result.response || null }, + timings: { startedAt, finishedAt: new Date().toISOString() } + }; + } + + if (!exists) { + return { + success: false, + operation: 'upload', + error: { code: 'FILE_NOT_FOUND', message: `Local file not found: ${filePath}` }, + file: { localPath: filePath, normalizedPath: relPath } + }; + } + + if (isAssetsPath(relPath)) { + logFn(`[sync-file] Uploading asset: ${relPath}`); + await uploadAsset({ gateway, relPath, log: logFn }); + logFn(`[sync-file] Asset upload completed: ${relPath}`); + return { + success: true, + operation: 'update', + file: { localPath: filePath, normalizedPath: relPath, isAsset: true, size: fs.statSync(absPath).size }, + // server: { responseCode: 200, method: 'asset.directUpload+manifest' }, + timings: { startedAt, finishedAt: new Date().toISOString() } + }; + } else { + logFn(`[sync-file] Uploading non-asset: ${relPath}`); + const res = await uploadNonAsset({ gateway, relPath, log: logFn }); + logFn(`[sync-file] Non-asset upload completed: ${relPath}, response status: ${res.response?.status || 'unknown'}`); + return { + success: true, + operation: 'update', + file: { localPath: filePath, normalizedPath: computeRemotePath(relPath), isAsset: false, size: fs.statSync(absPath).size }, + // server: { responseCode: 200, method: 'gateway.sync', gatewayResponse: res.response || null }, + timings: { startedAt, finishedAt: new Date().toISOString() } + }; + } + } catch (e) { + // Extract response body details (422 validation errors, etc.) + const body = e?.response?.body; + const serverError = body?.error || (Array.isArray(body?.errors) && body.errors.join(', ')) || null; + const serverDetails = body?.details || null; + const statusCode = e?.statusCode || e?.response?.statusCode || null; + + const detail = serverError || String(e?.message || e); + logFn(`[sync-file] Error during ${op} for ${relPath} (${statusCode}): ${detail}`); + + const errPayload = { + code: 'GATEWAY_ERROR', + message: detail, + statusCode, + details: { + operation: op, + file: { localPath: filePath, normalizedPath: relPath }, + ...(serverDetails && { server: serverDetails }) + } + }; + const err = new Error(`${errPayload.code}: ${detail}`); + err._pos = errPayload; + throw err; + } + } +}; + +export default singleFileTool; +export { computeRemotePath, normalizeLocalPath, toPosix }; diff --git a/mcp-min/tests/run-async-result.js b/mcp-min/tests/run-async-result.js new file mode 100644 index 000000000..dcff28907 --- /dev/null +++ b/mcp-min/tests/run-async-result.js @@ -0,0 +1,155 @@ +// platformos.tests.run-async-result - check result of an async test run via /_tests/results/:id +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; + +const settings = { fetchSettings }; + +async function makeRequest(options) { + const { uri, method = 'GET', headers = {} } = options; + const response = await fetch(uri, { method, headers }); + const body = await response.text(); + return { statusCode: response.status, body }; +} + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +function normalizeResult(result) { + const status = result.status; + const data = { + id: result.id, + status, + test_name: result.test_name, + total_assertions: parseInt(result.total_assertions, 10) || 0, + total_errors: parseInt(result.total_errors, 10) || 0, + total_duration: parseInt(result.total_duration, 10) || 0, + error_message: result.error_message || '', + tests: result.tests || [], + pending: status === 'pending', + passed: status === 'success', + done: status !== 'pending' + }; + return data; +} + +const testsRunAsyncResultTool = { + description: 'Check the result of an async test run by ID via /_tests/results/:id. Returns current status: pending (still running), success (all passed), failed (assertion failures), or error (runner crashed). Poll until done=true.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + id: { type: 'string', description: 'Test run ID returned by tests-run-async' } + }, + required: ['id'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:tests-run-async-result invoked', { id: params?.id, env: params?.env }); + + try { + const auth = await resolveAuth(params); + const requestFn = ctx.request || makeRequest; + const authHeaders = { + 'Authorization': `Token ${auth.token}`, + 'UserTemporaryToken': auth.token + }; + + const runId = params.id; + const resultsUrl = `${auth.url}/_tests/results/${runId}`; + + log.debug('Fetching test results', { url: resultsUrl }); + + const response = await requestFn({ + method: 'GET', + uri: resultsUrl, + headers: authHeaders + }); + + const authMeta = { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source }; + + if (response.statusCode >= 400) { + return { + ok: false, + error: { + code: 'HTTP_ERROR', + message: `Results request failed with status ${response.statusCode}`, + statusCode: response.statusCode, + body: response.body + }, + meta: { url: resultsUrl, startedAt, finishedAt: new Date().toISOString(), auth: authMeta } + }; + } + + let result; + try { + result = JSON.parse(response.body); + } catch { + return { + ok: false, + error: { + code: 'INVALID_RESPONSE', + message: 'Failed to parse results response as JSON', + body: response.body + }, + meta: { url: resultsUrl, startedAt, finishedAt: new Date().toISOString(), auth: authMeta } + }; + } + + if (result.error === 'not_found') { + return { + ok: false, + error: { + code: 'NOT_FOUND', + message: `Test result ${runId} not found`, + data: result + }, + meta: { url: resultsUrl, startedAt, finishedAt: new Date().toISOString(), auth: authMeta } + }; + } + + const data = normalizeResult(result); + + return { + ok: true, + data, + meta: { url: resultsUrl, startedAt, finishedAt: new Date().toISOString(), auth: authMeta } + }; + } catch (e) { + log.error('tool:tests-run-async-result error', { error: String(e) }); + return { + ok: false, + error: { code: 'TESTS_RESULT_ERROR', message: String(e.message || e) } + }; + } + } +}; + +export default testsRunAsyncResultTool; diff --git a/mcp-min/tests/run-async.js b/mcp-min/tests/run-async.js new file mode 100644 index 000000000..dc99da143 --- /dev/null +++ b/mcp-min/tests/run-async.js @@ -0,0 +1,157 @@ +// platformos.tests.run-async - trigger tests via /_tests/run_async (returns immediately) +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; + +const settings = { fetchSettings }; + +async function makeRequest(options) { + const { uri, method = 'GET', headers = {} } = options; + const response = await fetch(uri, { method, headers }); + const body = await response.text(); + return { statusCode: response.status, body }; +} + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +const testsRunAsyncTool = { + description: 'Trigger a background platformOS test run via /_tests/run_async. Returns immediately with a run ID. Use tests-run-async-result to poll for completion.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:tests-run-async invoked', { env: params?.env }); + + try { + const auth = await resolveAuth(params); + const requestFn = ctx.request || makeRequest; + const authHeaders = { + 'Authorization': `Token ${auth.token}`, + 'UserTemporaryToken': auth.token + }; + + const triggerUrl = `${auth.url}/_tests/run_async`; + log.debug('Triggering async test run', { url: triggerUrl }); + + const triggerResponse = await requestFn({ + method: 'GET', + uri: triggerUrl, + headers: authHeaders + }); + + if (triggerResponse.statusCode >= 400) { + return { + ok: false, + error: { + code: 'HTTP_ERROR', + message: `Trigger request failed with status ${triggerResponse.statusCode}`, + statusCode: triggerResponse.statusCode, + body: triggerResponse.body + }, + meta: { + url: triggerUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + let runInfo; + try { + runInfo = JSON.parse(triggerResponse.body); + } catch { + return { + ok: false, + error: { + code: 'INVALID_RESPONSE', + message: 'Failed to parse run_async response as JSON', + body: triggerResponse.body + }, + meta: { + url: triggerUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + const runId = runInfo.id; + if (!runId) { + return { + ok: false, + error: { + code: 'MISSING_ID', + message: 'run_async response did not contain an id', + data: runInfo + }, + meta: { + url: triggerUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + log.debug('Test run triggered', { id: runId, test_name: runInfo.test_name }); + + return { + ok: true, + data: { + id: runId, + test_name: runInfo.test_name, + status: runInfo.status || 'pending', + result_url: runInfo.result_url || `/_tests/results/${runId}` + }, + meta: { + url: triggerUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + log.error('tool:tests-run-async error', { error: String(e) }); + return { + ok: false, + error: { code: 'TESTS_RUN_ASYNC_ERROR', message: String(e.message || e) } + }; + } + } +}; + +export default testsRunAsyncTool; diff --git a/mcp-min/tests/run.js b/mcp-min/tests/run.js new file mode 100644 index 000000000..57087f683 --- /dev/null +++ b/mcp-min/tests/run.js @@ -0,0 +1,371 @@ +// platformos.tests.run - execute tests via /_tests/run?formatter=text +import log from '../log.js'; +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; + +const settings = { fetchSettings }; + +// Helper to make HTTP requests (replaces request-promise) +async function makeRequest(options) { + const { uri, method = 'GET', headers = {} } = options; + const response = await fetch(uri, { method, headers }); + const body = await response.text(); + return { statusCode: response.status, body }; +} + +function maskToken(token) { + if (!token) return token; + return token.slice(0, 3) + '...' + token.slice(-3); +} + +async function resolveAuth(params) { + if (params?.url && params?.email && params?.token) { + return { url: params.url, email: params.email, token: params.token, source: 'params' }; + } + const { MPKIT_URL, MPKIT_EMAIL, MPKIT_TOKEN } = process.env; + if (MPKIT_URL && MPKIT_EMAIL && MPKIT_TOKEN) { + return { url: MPKIT_URL, email: MPKIT_EMAIL, token: MPKIT_TOKEN, source: 'env' }; + } + if (params?.env) { + const found = await settings.fetchSettings(params.env); + if (found) return { ...found, source: `.pos(${params.env})` }; + } + const conf = files.getConfig(); + const firstEnv = conf && Object.keys(conf)[0]; + if (firstEnv) { + const found = conf[firstEnv]; + if (found) return { ...found, source: `.pos(${firstEnv})` }; + } + throw new Error('AUTH_MISSING: Provide url,email,token or configure .pos / MPKIT_* env vars'); +} + +/** + * Parse the text response from /_tests/run?formatter=text + * + * Supports two formats: + * + * Format 1 (JSON): + * {"path":"tests/example_test"}{"class_name":"...","message":"..."} + * ------------------------ + * Assertions: 5. Failed: 1. Time: 123ms + * + * Format 2 (Text/Indented): + * ------------------------ + * commands/questions/create_test + * build_valid should be valid: + * errors_populated translation missing: en.test.should.be_true + * ------------------------ + * Failed_ + * Total errors: 4 + * Assertions: 11. Failed: 4. Time: 267ms + */ +function parseTestResponse(text) { + const lines = text.split('\n'); + const tests = []; + let summary = { assertions: 0, failed: 0, timeMs: 0, totalErrors: 0 }; + + let currentTestPath = null; + let currentTestCases = []; + let inFailedSection = false; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + const trimmed = line.trim(); + + // Skip empty lines + if (!trimmed) { + continue; + } + + // Skip separator lines + if (/^-+$/.test(trimmed)) { + // If we have a current test, save it before moving on + if (currentTestPath) { + tests.push({ + path: currentTestPath, + cases: currentTestCases, + passed: currentTestCases.every(c => c.passed) + }); + currentTestPath = null; + currentTestCases = []; + } + continue; + } + + // Check for summary line: "Assertions: X. Failed: Y. Time: Zms" + const summaryMatch = trimmed.match(/Assertions:\s*(\d+)\.\s*Failed:\s*(\d+)\.\s*Time:\s*(\d+)ms/i); + if (summaryMatch) { + summary.assertions = parseInt(summaryMatch[1], 10); + summary.failed = parseInt(summaryMatch[2], 10); + summary.timeMs = parseInt(summaryMatch[3], 10); + continue; + } + + // Check for "Total errors: X" line + const totalErrorsMatch = trimmed.match(/Total errors:\s*(\d+)/i); + if (totalErrorsMatch) { + summary.totalErrors = parseInt(totalErrorsMatch[1], 10); + continue; + } + + // Check for "Failed_" section marker + if (trimmed === 'Failed_') { + inFailedSection = true; + continue; + } + + // Skip lines in Failed_ section (we already have the info) + if (inFailedSection) { + continue; + } + + // Check for "SYNTAX ERROR:" prefix - strip it and parse JSON + let lineToParse = trimmed; + let isSyntaxError = false; + if (trimmed.startsWith('SYNTAX ERROR:')) { + lineToParse = trimmed.slice('SYNTAX ERROR:'.length); + isSyntaxError = true; + } + + // Try to parse JSON objects from the line (Format 1) + const jsonObjects = extractJsonObjects(lineToParse); + if (jsonObjects.length > 0) { + const testResult = { raw: jsonObjects }; + + if (isSyntaxError) { + testResult.syntaxError = true; + } + + for (const obj of jsonObjects) { + if (obj.path) { + testResult.path = obj.path; + } + if (obj.class_name) { + testResult.error = { + className: obj.class_name, + message: obj.message || '' + }; + } + if (obj.status) testResult.status = obj.status; + if (obj.name) testResult.name = obj.name; + if (obj.assertions !== undefined) testResult.assertions = obj.assertions; + if (obj.failures !== undefined) testResult.failures = obj.failures; + } + + tests.push(testResult); + continue; + } + + // Format 2: Check if this is an indented test case (starts with spaces) + if (line.startsWith(' ') && currentTestPath) { + // This is a test case line + // Formats: + // - " build_valid should be valid:" - pass (ends with colon, describing expected state) + // - " result.results should not be blank" - pass (assertion description, no error) + // - " errors_populated translation missing: en.test..." - fail (has error message) + + const caseMatch = trimmed.match(/^(\S+)\s+(.*)$/); + if (caseMatch) { + const caseName = caseMatch[1]; + const rest = caseMatch[2]; + + // Check for failure patterns - error messages typically contain these patterns + const failurePatterns = [ + /translation missing:/i, + /error:/i, + /failed:/i, + /exception:/i, + /undefined method/i, + /cannot find/i, + /not found/i + ]; + + const isFailure = failurePatterns.some(pattern => pattern.test(rest)); + + if (isFailure) { + // Has error content - this is a failure + currentTestCases.push({ + name: caseName, + passed: false, + error: rest + }); + } else if (rest.match(/^[^:]+:$/)) { + // Ends with ":" and nothing after - this is a pass with description + // e.g., "should be valid:" + const description = rest.slice(0, -1).trim(); + currentTestCases.push({ + name: caseName, + description, + passed: true + }); + } else { + // No error pattern and doesn't end with colon - treat as pass + // e.g., "should not be blank" + currentTestCases.push({ + name: caseName, + description: rest, + passed: true + }); + } + } + continue; + } + + // Format 2: Non-indented line that's not a separator or summary - likely a test path + if (!line.startsWith(' ') && !trimmed.startsWith('{')) { + // Save previous test if exists + if (currentTestPath) { + tests.push({ + path: currentTestPath, + cases: currentTestCases, + passed: currentTestCases.every(c => c.passed) + }); + } + currentTestPath = trimmed; + currentTestCases = []; + } + } + + // Don't forget the last test if we ended without a separator + if (currentTestPath) { + tests.push({ + path: currentTestPath, + cases: currentTestCases, + passed: currentTestCases.every(c => c.passed) + }); + } + + return { tests, summary }; +} + +/** + * Extract JSON objects from a string that may contain multiple concatenated JSON objects + */ +function extractJsonObjects(str) { + const objects = []; + let depth = 0; + let start = -1; + + for (let i = 0; i < str.length; i++) { + const char = str[i]; + + if (char === '{') { + if (depth === 0) { + start = i; + } + depth++; + } else if (char === '}') { + depth--; + if (depth === 0 && start !== -1) { + const jsonStr = str.slice(start, i + 1); + try { + const parsed = JSON.parse(jsonStr); + objects.push(parsed); + } catch (e) { + // Invalid JSON, skip + log.debug('Failed to parse JSON object', { jsonStr, error: e.message }); + } + start = -1; + } + } + } + + return objects; +} + +const testsRunTool = { + description: 'Run platformOS tests via /_tests/run endpoint. Returns parsed test results with assertions count, failures, and timing.', + inputSchema: { + type: 'object', + additionalProperties: false, + properties: { + env: { type: 'string', description: 'Environment name from .pos config' }, + url: { type: 'string', description: 'Instance URL (alternative to env)' }, + email: { type: 'string', description: 'Account email (alternative to env)' }, + token: { type: 'string', description: 'API token (alternative to env)' }, + path: { type: 'string', description: 'Optional test path filter (e.g., "tests/users")' }, + name: { type: 'string', description: 'Test name filter (e.g., "create_user_test"). Required to avoid running all tests which causes timeouts.' } + }, + required: ['env', 'name'] + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + log.debug('tool:unit-tests-run invoked', { env: params?.env, path: params?.path }); + + try { + const auth = await resolveAuth(params); + + // Build the URL with query parameters + let testUrl = `${auth.url}/_tests/run?formatter=text`; + if (params?.path) { + testUrl += `&path=${encodeURIComponent(params.path)}`; + } + if (params?.name) { + testUrl += `&name=${encodeURIComponent(params.name)}`; + } + + log.debug('Requesting tests', { url: testUrl }); + + // Make the request + const requestFn = ctx.request || makeRequest; + const response = await requestFn({ + method: 'GET', + uri: testUrl, + headers: { + 'Authorization': `Token ${auth.token}`, + 'UserTemporaryToken': auth.token + } + }); + + const statusCode = response.statusCode; + const body = response.body; + + if (statusCode >= 400) { + return { + ok: false, + error: { + code: 'HTTP_ERROR', + message: `Request failed with status ${statusCode}`, + statusCode, + body + }, + meta: { + url: testUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } + + // Parse the response + const parsed = parseTestResponse(body); + + return { + ok: true, + data: { + tests: parsed.tests, + summary: parsed.summary, + passed: parsed.summary.failed === 0, + totalTests: parsed.tests.length + }, + raw: body, + meta: { + url: testUrl, + startedAt, + finishedAt: new Date().toISOString(), + auth: { url: auth.url, email: auth.email, token: maskToken(auth.token), source: auth.source } + } + }; + } catch (e) { + log.error('tool:unit-tests-run error', { error: String(e) }); + return { + ok: false, + error: { code: 'TESTS_RUN_ERROR', message: String(e.message || e) } + }; + } + } +}; + +export default testsRunTool; +export { parseTestResponse, extractJsonObjects }; diff --git a/mcp-min/tools.config.json b/mcp-min/tools.config.json new file mode 100644 index 000000000..6de244a31 --- /dev/null +++ b/mcp-min/tools.config.json @@ -0,0 +1,146 @@ +{ + "$schema": "./tools.config.schema.json", + "_comment": "Tool configuration for mcp-min. Edit descriptions and enable/disable tools without touching JS code.", + "tools": { + "envs-list": { + "enabled": true, + "description": "List configured environments from .pos (name and url)" + }, + "logs-fetch": { + "enabled": true, + "description": "Fetch recent logs in batches (NDJSON semantics, returns JSON array here). Mirrors pos-cli fetch-logs." + }, + "liquid-exec": { + "enabled": true, + "description": "Render a Liquid template on a platformOS instance (pass-through to /api/app_builder/liquid_exec)." + }, + "graphql-exec": { + "enabled": true, + "description": "Execute GraphQL query/mutation on a platformOS instance via /api/graph." + }, + "generators-list": { + "enabled": true, + "description": "List available generators discovered under **/generators/*/index.js with required and optional args" + }, + "generators-help": { + "enabled": true, + "description": "Show detailed help for a specific generator" + }, + "generators-run": { + "enabled": true, + "description": "Run a yeoman generator by path with arguments and options" + }, + "migrations-list": { + "enabled": true, + "description": "List migrations deployed to the server with their current status." + }, + "migrations-generate": { + "enabled": true, + "description": "Generate a migration on server and write local file unless skipWrite=true." + }, + "migrations-run": { + "enabled": true, + "description": "Run a specific migration identified by timestamp or full name." + }, + "deploy-start": { + "enabled": true, + "description": "Deploy to platformOS instance. Creates archive from app/ and modules/ directories, uploads it, and deploys assets directly to S3." + }, + "deploy-status": { + "enabled": true, + "description": "Get current deployment status using Gateway.getStatus(id)." + }, + "deploy-wait": { + "enabled": true, + "description": "Wait for deployment to finish. Polls Gateway.getStatus(id) every intervalMs (default 1000ms) and errors on status=error." + }, + "data-import": { + "enabled": true, + "description": "Import data to platformOS instance. Accepts JSON (converted to CSV internally) or ZIP file with CSV files." + }, + "data-import-status": { + "enabled": true, + "description": "Check the status of a data import job. Poll until status is \"done\" or \"failed\"." + }, + "data-export": { + "enabled": true, + "description": "Start data export from platformOS instance. Returns job ID for status polling. When complete, status will include data or zip_file_url." + }, + "data-export-status": { + "enabled": true, + "description": "Check the status of a data export job. When done, returns data (JSON) or zip_file_url (ZIP)." + }, + "data-clean": { + "enabled": true, + "description": "Start data clean operation to remove data from a platformOS instance. DESTRUCTIVE - requires confirmation. Returns job ID for status polling." + }, + "data-clean-status": { + "enabled": true, + "description": "Check the status of a data clean job. Poll until status is \"done\" or \"failed\"." + }, + "data-validate": { + "enabled": true, + "description": "Validate JSON data against platformOS schemas before import. Checks required fields (id, type, properties, created_at, updated_at), verifies types match schema files in app/schema/, and validates property names and types." + }, + "unit-tests-run": { + "enabled": true, + "description": "Run platformOS tests via /_tests/run endpoint. Returns parsed test results with assertions count, failures, and timing." + }, + "tests-run-async": { + "enabled": true, + "description": "Trigger a background platformOS test run via /_tests/run_async. Returns immediately with a run ID. Use tests-run-async-result to poll for completion." + }, + "tests-run-async-result": { + "enabled": true, + "description": "Check the result of an async test run by ID via /_tests/results/:id. Returns status: pending, success, failed, or error. Poll until done=true." + }, + "check": { + "enabled": false, + "description": "Run platformos-check linter to analyze the app for best practice violations. Checks Liquid and JSON files. Requires Ruby gem. Superseded by check-run." + }, + "check-run": { + "enabled": true, + "description": "Run platformos-check Node.js linter on the app. Analyzes Liquid/JSON files for violations. No Ruby required. Requires @platformos/platformos-check-node." + }, + "sync-file": { + "enabled": true, + "description": "Sync a file with a platformOS instance (upload or delete)." + }, + "uploads-push": { + "enabled": true, + "description": "Upload a ZIP file containing property uploads to platformOS instance. The ZIP should contain files referenced by upload-type properties." + }, + "constants-list": { + "enabled": true, + "description": "List all constants configured on a platformOS instance." + }, + "constants-set": { + "enabled": true, + "description": "Set a constant on a platformOS instance. Creates or updates the constant." + }, + "constants-unset": { + "enabled": true, + "description": "Delete a constant from a platformOS instance." + }, + "instance-create": { + "enabled": true, + "description": "Create a new platformOS instance via Partner Portal API. Returns job acknowledgment." + }, + "partners-list": { + "enabled": true, + "description": "List partners from Partner Portal. Optionally fetch billing plans for a specific partner." + }, + "partner-get": { + "enabled": true, + "description": "Get partner details including available billing plans from Partner Portal." + }, + "endpoints-list": { + "enabled": true, + "description": "List available regions/endpoints for instance creation from Partner Portal." + }, + "env-add": { + "enabled": true, + "description": "Add environment to .pos config. Streams verification URL for user to open, polls for authorization (up to 60s), then saves token." + } + } +} \ No newline at end of file diff --git a/mcp-min/tools.config.schema.json b/mcp-min/tools.config.schema.json new file mode 100644 index 000000000..5866e262a --- /dev/null +++ b/mcp-min/tools.config.schema.json @@ -0,0 +1,37 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "MCP-Min Tools Configuration", + "description": "Configuration for mcp-min tools: descriptions and enabled/disabled state", + "type": "object", + "properties": { + "$schema": { + "type": "string", + "description": "JSON Schema reference" + }, + "_comment": { + "type": "string", + "description": "Optional comment field" + }, + "tools": { + "type": "object", + "description": "Tool configurations keyed by tool name", + "additionalProperties": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "description": "Whether this tool is available to MCP clients", + "default": true + }, + "description": { + "type": "string", + "description": "Human-readable description shown to MCP clients" + } + }, + "additionalProperties": false + } + } + }, + "required": ["tools"], + "additionalProperties": false +} diff --git a/mcp-min/tools.js b/mcp-min/tools.js new file mode 100644 index 000000000..5bd6b96fa --- /dev/null +++ b/mcp-min/tools.js @@ -0,0 +1,175 @@ +// Define tools for the minimal MCP server +import log from './log.js'; +import { readFileSync } from 'fs'; +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; +import files from '../lib/files.js'; + +// Load tool configuration (descriptions and enabled/disabled state) +// MCP_TOOLS_CONFIG env var overrides the bundled config +const __dirname = dirname(fileURLToPath(import.meta.url)); +let toolsConfig = { tools: {} }; +const configPath = process.env.MCP_TOOLS_CONFIG || join(__dirname, 'tools.config.json'); +try { + toolsConfig = JSON.parse(readFileSync(configPath, 'utf-8')); + log.debug('tools config loaded', { path: configPath, tools: Object.keys(toolsConfig.tools || {}).length }); +} catch (err) { + log.debug('tools config not found or invalid, using defaults', { path: configPath, error: String(err) }); +} + +// Keep tools.js lean by extracting complex tools into modules +import singleFileTool from './sync/single-file.js'; +import fetchLogsTool from './logs/fetch.js'; +import execLiquidTool from './liquid/exec.js'; +import streamLogsTool from './logs/stream.js'; +import execGraphqlTool from './graphql/exec.js'; +import generatorsListTool from './generators/list.js'; +import generatorsHelpTool from './generators/help.js'; +import generatorsRunTool from './generators/run.js'; + +// migrations tools +import migrationsListTool from './migrations/list.js'; +import migrationsGenerateTool from './migrations/generate.js'; +import migrationsRunTool from './migrations/run.js'; + +// deploy tools +import deployStartTool from './deploy/start.js'; +import deployStatusTool from './deploy/status.js'; +import deployWaitTool from './deploy/wait.js'; + +// data tools +import dataImportTool from './data/import.js'; +import dataImportStatusTool from './data/import-status.js'; +import dataExportTool from './data/export.js'; +import dataExportStatusTool from './data/export-status.js'; +import dataCleanTool from './data/clean.js'; +import dataCleanStatusTool from './data/clean-status.js'; +import dataValidateTool from './data/validate-tool.js'; + +// tests tools +import testsRunTool from './tests/run.js'; +import testsRunAsyncTool from './tests/run-async.js'; +import testsRunAsyncResultTool from './tests/run-async-result.js'; + +// check tools +import checkTool from './check/index.js'; +import checkRunTool from './check/run.js'; + +// uploads tool +import uploadsPushTool from './uploads/push.js'; + +// constants tools +import constantsListTool from './constants/list.js'; +import constantsSetTool from './constants/set.js'; +import constantsUnsetTool from './constants/unset.js'; + +// portal tools +import instanceCreateTool from './portal/instance-create.js'; +import partnersListTool from './portal/partners-list.js'; +import partnerGetTool from './portal/partner-get.js'; +import endpointsListTool from './portal/endpoints-list.js'; +import envAddTool from './portal/env-add.js'; + +const tools = { + + // list-envs tool based on pos-cli-env list + 'envs-list': { + description: 'List configured environments from .pos (name and url)', + handler: async (_params, ctx) => { + log.debug('tool:list-envs invoked', { transport: ctx.transport }); + const settingsMap = Object(files.getConfig()); + const names = Object.keys(settingsMap); + const environments = names.map((name) => ({ name, url: settingsMap[name]?.url })); + return { environments }; + } + }, + + // logs.fetch: one-shot batch fetch of logs (uses Gateway.logs) + 'logs-fetch': fetchLogsTool, + + // liquid.exec: render Liquid template remotely via /liquid_exec + 'liquid-exec': execLiquidTool, + + // logs.stream: real-time streaming via polling + // 'logs-stream': streamLogsTool, + + // graphql.exec: run GraphQL query/mutation via /api/graph + 'graphql-exec': execGraphqlTool, + + // generators + 'generators-list': generatorsListTool, + 'generators-help': generatorsHelpTool, + 'generators-run': generatorsRunTool, + + // migrations + 'migrations-list': migrationsListTool, + 'migrations-generate': migrationsGenerateTool, + 'migrations-run': migrationsRunTool, + + // deploy + 'deploy-start': deployStartTool, + 'deploy-status': deployStatusTool, + 'deploy-wait': deployWaitTool, + + // data + 'data-import': dataImportTool, + 'data-import-status': dataImportStatusTool, + 'data-export': dataExportTool, + 'data-export-status': dataExportStatusTool, + 'data-clean': dataCleanTool, + 'data-clean-status': dataCleanStatusTool, + 'data-validate': dataValidateTool, + + // tests + 'unit-tests-run': testsRunTool, + 'tests-run-async': testsRunAsyncTool, + 'tests-run-async-result': testsRunAsyncResultTool, + + // check: run platformos-check linter + 'check': checkTool, + 'check-run': checkRunTool, + + // sync.singleFile: upload or delete a single file to platformOS instance + 'sync-file': singleFileTool, + + // uploads: push property uploads ZIP to instance + 'uploads-push': uploadsPushTool, + + // constants: manage instance constants + 'constants-list': constantsListTool, + 'constants-set': constantsSetTool, + 'constants-unset': constantsUnsetTool, + + // portal: Partner Portal instance management + 'instance-create': instanceCreateTool, + 'partners-list': partnersListTool, + 'partner-get': partnerGetTool, + 'endpoints-list': endpointsListTool, + 'env-add': envAddTool +}; + +// Apply configuration: override descriptions and filter disabled tools +function applyConfig(allTools, config) { + const result = {}; + for (const [name, tool] of Object.entries(allTools)) { + const cfg = config.tools?.[name]; + + // Skip disabled tools + if (cfg && cfg.enabled === false) { + log.debug('tool disabled by config', { name }); + continue; + } + + // Override description from config if present + if (cfg && cfg.description) { + result[name] = { ...tool, description: cfg.description }; + } else { + result[name] = tool; + } + } + return result; +} + +const configuredTools = applyConfig(tools, toolsConfig); + +export default configuredTools; diff --git a/mcp-min/uploads/push.js b/mcp-min/uploads/push.js new file mode 100644 index 000000000..ac894daa9 --- /dev/null +++ b/mcp-min/uploads/push.js @@ -0,0 +1,90 @@ +// platformos.uploads.push tool - upload property uploads ZIP to S3 +import fs from 'fs'; +import path from 'path'; + +import files from '../../lib/files.js'; +import { fetchSettings } from '../../lib/settings.js'; +import Gateway from '../../lib/proxy.js'; +import { presignUrl } from '../../lib/presignUrl.js'; +import { uploadFile } from '../../lib/s3UploadFile.js'; + +const settings = { fetchSettings }; + +async function resolveAuth(env, settingsModule = settings) { + const found = await settingsModule.fetchSettings(env); + if (found) return { ...found, source: `.pos(${env})` }; + throw new Error(`Environment "${env}" not found in .pos config`); +} + +const uploadsPushTool = { + description: 'Upload a ZIP file containing property uploads to platformOS instance. The ZIP should contain files referenced by upload-type properties.', + inputSchema: { + type: 'object', + additionalProperties: false, + required: ['env', 'filePath'], + properties: { + env: { type: 'string', description: 'Environment name' }, + filePath: { type: 'string', description: 'Path to ZIP file with uploads' } + } + }, + handler: async (params, ctx = {}) => { + const startedAt = new Date().toISOString(); + + try { + const auth = await resolveAuth(params.env, ctx.settings || settings); + + // Set env vars required by presignUrl + process.env.MARKETPLACE_TOKEN = auth.token; + process.env.MARKETPLACE_URL = auth.url; + + // Resolve file path + const filePath = path.resolve(params.filePath); + + if (!fs.existsSync(filePath)) { + return { + ok: false, + error: { code: 'FILE_NOT_FOUND', message: `File not found: ${filePath}` } + }; + } + + // Get instance ID + const GatewayCtor = ctx.Gateway || Gateway; + const gateway = new GatewayCtor({ url: auth.url, token: auth.token, email: auth.email }); + const instance = await gateway.getInstance(); + const instanceId = instance.id; + + // Build S3 path for property uploads + const s3Path = `instances/${instanceId}/property_uploads/data.public_property_upload_import.zip`; + + // Get presigned URL and upload (allow injection for testing) + const presignUrlFn = ctx.presignUrl || presignUrl; + const uploadFileFn = ctx.uploadFile || uploadFile; + const { uploadUrl, accessUrl } = await presignUrlFn(s3Path, filePath); + await uploadFileFn(filePath, uploadUrl); + + return { + ok: true, + data: { + instanceId, + filePath, + accessUrl + }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } catch (e) { + return { + ok: false, + error: { code: 'UPLOAD_FAILED', message: String(e.message || e) }, + meta: { + startedAt, + finishedAt: new Date().toISOString() + } + }; + } + } +}; + +export default uploadsPushTool; diff --git a/mcp-v2.md b/mcp-v2.md new file mode 100644 index 000000000..8e29ce443 --- /dev/null +++ b/mcp-v2.md @@ -0,0 +1,415 @@ +# PlatformOS MCP Server Implementation Plan + +## Overview + +This document outlines the implementation plan for building an MCP (Model Context Protocol) server that provides comprehensive access to platformOS API functionality. The server will leverage the existing pos-cli codebase as a foundation, exposing platformOS operations as MCP tools for use by AI assistants and other MCP clients. + +## Current State Analysis + +### Existing CLI Structure +The pos-cli tool is a comprehensive Node.js CLI application with the following architecture: + +- **Main Entry Point**: `bin/pos-cli.js` - Uses Commander.js for command structure +- **Core Library**: `lib/` directory containing modular functionality: + - `proxy.js`: Main Gateway class for API communication + - `apiRequest.js`: HTTP request abstraction + - `environments.js`: Environment management and OAuth flow + - `portal.js`: Partner Portal API integration + - `graph/queries.js`: GraphQL query definitions + - Various domain-specific modules (data, deploy, modules, etc.) + +### Key Capabilities to Expose +1. **Environment Management**: Add, list, authenticate environments +2. **Data Operations**: Import, export, clean data +3. **Deployment**: Deploy code, sync changes, pull app data +4. **Constants Management**: Set, unset, list environment constants +5. **Modules**: Install, remove, list, publish modules +6. **Migrations**: Generate, run, list migrations +7. **Logs**: Fetch and monitor application logs (v1 and v2) +8. **GraphQL**: Execute custom GraphQL queries +9. **Liquid**: Execute Liquid templates +10. **File Operations**: Upload files, manage assets +11. **Audit**: Code quality and deprecation checks + +## MCP Server Architecture + +### Project Structure +``` +platformos-mcp-server/ +├── package.json +├── README.md +├── src/ +│ ├── index.js # MCP server entry point +│ ├── server.js # MCP server implementation +│ ├── tools/ # MCP tool definitions +│ │ ├── index.js +│ │ ├── environment.js # Environment management tools +│ │ ├── data.js # Data import/export tools +│ │ ├── deploy.js # Deployment tools +│ │ ├── constants.js # Constants management tools +│ │ ├── modules.js # Module management tools +│ │ ├── migrations.js # Migration tools +│ │ ├── logs.js # Logging tools +│ │ ├── graphql.js # GraphQL execution tools +│ │ └── files.js # File management tools +│ ├── lib/ # Core library (adapted from pos-cli) +│ │ ├── gateway.js # API communication +│ │ ├── auth.js # Authentication handling +│ │ ├── config.js # Configuration management +│ │ └── utils.js # Utility functions +│ └── types/ # TypeScript definitions +│ └── tools.ts +├── tests/ +└── examples/ +``` + +### Core Components + +#### 1. MCP Server Implementation (`src/server.js`) +```javascript +import { Server } from '@modelcontextprotocol/sdk/server/index.js'; +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; +import { tools } from './tools/index.js'; + +class PlatformOSMCPServer { + constructor() { + this.server = new Server({ + name: "platformos-server", + version: "1.0.0" + }, { + capabilities: { + tools: {} + } + }); + } + + async initialize() { + // Register all tools + tools.forEach(tool => { + this.server.setRequestHandler(ListToolsRequestSchema, async () => ({ + tools: [tool.definition] + })); + + this.server.setRequestHandler(CallToolRequestSchema, async (request) => { + if (request.params.name === tool.name) { + return await tool.handler(request.params.arguments); + } + }); + }); + } +} +``` + +#### 2. Tool Definitions Structure +Each tool category will follow this pattern: + +```javascript +// src/tools/environment.js +export const environmentTools = [ + { + name: "platformos_env_add", + description: "Add a new platformOS environment", + inputSchema: { + type: "object", + properties: { + environment: { type: "string", description: "Environment name" }, + url: { type: "string", description: "Instance URL" }, + email: { type: "string", description: "Partner Portal email" }, + partner_portal_url: { type: "string", description: "Partner Portal URL" } + }, + required: ["environment", "url"] + }, + handler: async (args) => { + // Implement environment addition logic + const gateway = new Gateway(); + return await gateway.addEnvironment(args); + } + }, + // Additional environment tools... +]; +``` + +## Implementation Phases + +### Phase 1: Foundation (Week 1-2) +**Goal**: Set up basic MCP server infrastructure and core authentication + +**Tasks**: +1. Initialize Node.js project with MCP SDK dependencies +2. Create basic MCP server structure +3. Port essential components from pos-cli: + - Gateway class (`lib/proxy.js` → `src/lib/gateway.js`) + - Authentication handling (`lib/environments.js` → `src/lib/auth.js`) + - Configuration management (`lib/settings.js` → `src/lib/config.js`) +4. Implement environment management tools: + - `platformos_env_add` + - `platformos_env_list` + - `platformos_env_auth` +5. Create basic test suite +6. Set up development and build scripts + +**Deliverables**: +- Working MCP server that can authenticate with platformOS +- Environment management functionality +- Basic documentation + +### Phase 2: Core Data Operations (Week 3-4) +**Goal**: Implement data import/export and basic deployment tools + +**Tasks**: +1. Port data operation logic from pos-cli +2. Implement data management tools: + - `platformos_data_export` + - `platformos_data_import` + - `platformos_data_clean` +3. Implement basic deployment tools: + - `platformos_deploy` + - `platformos_sync_start` + - `platformos_sync_stop` +4. Add file upload capabilities +5. Implement status monitoring for long-running operations +6. Add comprehensive error handling + +**Deliverables**: +- Data import/export functionality +- Basic deployment capabilities +- Robust error handling and status reporting + +### Phase 3: Advanced Features (Week 5-6) +**Goal**: Add GraphQL, constants, modules, and migration support + +**Tasks**: +1. Implement GraphQL execution tools: + - `platformos_graphql_query` + - `platformos_graphql_mutation` +2. Add constants management: + - `platformos_constants_set` + - `platformos_constants_unset` + - `platformos_constants_list` +3. Implement module management: + - `platformos_modules_list` + - `platformos_modules_install` + - `platformos_modules_remove` +4. Add migration support: + - `platformos_migrations_generate` + - `platformos_migrations_run` + - `platformos_migrations_list` +5. Implement Liquid template execution +6. Add comprehensive logging and debugging + +**Deliverables**: +- Full GraphQL integration +- Complete constants and modules management +- Migration system support + +### Phase 4: Monitoring and Advanced Operations (Week 7-8) +**Goal**: Complete the feature set with logging, audit, and advanced operations + +**Tasks**: +1. Implement logging tools: + - `platformos_logs_fetch` + - `platformos_logs_search` + - `platformos_logs_monitor` +2. Add audit functionality: + - `platformos_audit_run` + - `platformos_audit_report` +3. Implement file and asset management: + - `platformos_files_upload` + - `platformos_files_list` + - `platformos_assets_sync` +4. Add instance cloning capabilities +5. Implement GUI server integration (if needed) +6. Performance optimization and caching +7. Comprehensive documentation and examples + +**Deliverables**: +- Complete logging and monitoring system +- Audit functionality +- Full file management capabilities +- Production-ready server + +## Technical Specifications + +### MCP Tool Categories + +#### 1. Environment Management +- **platformos_env_add**: Add new environment with authentication +- **platformos_env_list**: List all configured environments +- **platformos_env_auth**: Refresh authentication for environment +- **platformos_env_remove**: Remove environment configuration + +#### 2. Data Operations +- **platformos_data_export**: Export instance data to JSON/CSV +- **platformos_data_import**: Import data from JSON/CSV +- **platformos_data_clean**: Clean all data from instance (staging only) +- **platformos_data_update**: Update specific data records + +#### 3. Deployment and Sync +- **platformos_deploy**: Full deployment to environment +- **platformos_sync_start**: Start file synchronization +- **platformos_sync_stop**: Stop active synchronization +- **platformos_pull**: Export app configuration to local files + +#### 4. Constants Management +- **platformos_constants_list**: List all environment constants +- **platformos_constants_set**: Set constant value +- **platformos_constants_unset**: Remove constant + +#### 5. Module Management +- **platformos_modules_list**: List installed modules +- **platformos_modules_install**: Install module from marketplace +- **platformos_modules_remove**: Uninstall module +- **platformos_modules_update**: Update module to latest version + +#### 6. Migration System +- **platformos_migrations_list**: List all migrations and status +- **platformos_migrations_generate**: Create new migration file +- **platformos_migrations_run**: Execute specific migration + +#### 7. GraphQL Operations +- **platformos_graphql_query**: Execute GraphQL query +- **platformos_graphql_mutation**: Execute GraphQL mutation +- **platformos_graphql_schema**: Fetch GraphQL schema information + +#### 8. Logging and Monitoring +- **platformos_logs_fetch**: Retrieve recent logs +- **platformos_logs_search**: Search logs with filters +- **platformos_logs_monitor**: Start real-time log monitoring +- **platformos_logs_alerts**: Manage log alerts + +#### 9. Development Tools +- **platformos_liquid_exec**: Execute Liquid templates +- **platformos_audit_run**: Run code audit and quality checks +- **platformos_generate**: Generate boilerplate code + +### Authentication Strategy + +The MCP server will support multiple authentication methods: + +1. **Environment Variables**: For CI/CD integration + ```bash + MPKIT_URL=https://example.com + MPKIT_EMAIL=user@example.com + MPKIT_TOKEN=token_here + ``` + +2. **Configuration File**: `.pos` file compatible with existing pos-cli + ```json + { + "staging": { + "url": "https://staging.example.com", + "email": "user@example.com", + "token": "token_here" + } + } + ``` + +3. **OAuth Flow**: Device authorization flow for interactive authentication + - Implement same flow as pos-cli for new environment setup + - Store tokens securely in configuration + +### Error Handling Strategy + +1. **Structured Error Responses**: All tools return consistent error format + ```json + { + "success": false, + "error": { + "code": "AUTH_FAILED", + "message": "Authentication failed for environment 'staging'", + "details": { /* additional context */ } + } + } + ``` + +2. **Network Error Recovery**: Retry logic for transient failures +3. **Validation**: Input validation with clear error messages +4. **Logging**: Comprehensive logging for debugging + +### Configuration Management + +The server will use a layered configuration approach: +1. Command-line arguments (highest priority) +2. Environment variables +3. Configuration files (.pos, package.json) +4. Default values (lowest priority) + +### Performance Considerations + +1. **Connection Pooling**: Reuse HTTP connections for API requests +2. **Caching**: Cache authentication tokens and instance metadata +3. **Streaming**: Stream large file uploads/downloads +4. **Concurrency**: Parallel processing for bulk operations +5. **Rate Limiting**: Respect API rate limits + +## Development Guidelines + +### Code Standards +- **Language**: Node.js with ESM modules +- **TypeScript**: Use for type safety where beneficial +- **Testing**: Jest for unit tests, integration tests for API operations +- **Linting**: ESLint with standardized configuration +- **Documentation**: JSDoc for functions, comprehensive README + +### Dependencies +- `@modelcontextprotocol/sdk`: MCP server implementation +- `commander`: CLI argument parsing (if needed) +- `request-promise`: HTTP requests (maintain compatibility with pos-cli) +- `lodash`: Utility functions +- `chalk`: Console output formatting +- `fs-extra`: Enhanced file system operations + +### Testing Strategy +1. **Unit Tests**: Test individual tool functions +2. **Integration Tests**: Test API communication +3. **End-to-End Tests**: Test complete workflows +4. **Mock Server**: Mock platformOS API for testing + +### Documentation Requirements +1. **API Documentation**: Complete tool reference +2. **Setup Guide**: Installation and configuration +3. **Examples**: Common usage patterns +4. **Troubleshooting**: Common issues and solutions + +## Success Metrics + +### Functional Completeness +- [ ] All major pos-cli commands available as MCP tools +- [ ] Feature parity with existing CLI functionality +- [ ] Comprehensive error handling and user feedback + +### Usability +- [ ] Clear, consistent tool naming and parameters +- [ ] Helpful error messages and validation +- [ ] Comprehensive documentation with examples + +### Reliability +- [ ] Robust authentication handling +- [ ] Network error recovery +- [ ] Graceful handling of API limitations + +### Performance +- [ ] Response times comparable to direct pos-cli usage +- [ ] Efficient handling of large file operations +- [ ] Minimal memory footprint + +## Future Enhancements + +### Phase 5: Advanced Features (Future) +- **Real-time Collaboration**: Multi-user synchronization +- **Advanced Caching**: Intelligent caching strategies +- **Plugin System**: Extensible tool system +- **GUI Integration**: Web-based management interface +- **CLI Compatibility**: Drop-in replacement for pos-cli + +### Phase 6: Enterprise Features (Future) +- **Team Management**: Multi-user environment sharing +- **Audit Logging**: Comprehensive operation logging +- **Compliance Tools**: Security and compliance checking +- **Advanced Deployment**: Blue/green deployments, rollbacks + +## Conclusion + +This implementation plan provides a comprehensive roadmap for creating a fully-featured MCP server for platformOS. By leveraging the existing pos-cli codebase and following a phased approach, we can deliver a robust, reliable, and user-friendly MCP server that exposes the full power of the platformOS API to AI assistants and other MCP clients. + +The phased approach ensures that core functionality is available early while allowing for iterative improvement and feature additions. The modular architecture will support future enhancements and maintain compatibility with the existing platformOS ecosystem. \ No newline at end of file diff --git a/mcp/__tests__/http.test.js b/mcp/__tests__/http.test.js new file mode 100644 index 000000000..46940ce57 --- /dev/null +++ b/mcp/__tests__/http.test.js @@ -0,0 +1,79 @@ +let request; +try { + request = require('supertest'); +} catch (e) { + request = null; +} +const { createServer } = require('../server'); + +let app; +beforeAll(async () => { + if (!request) { + request = (await import('supertest')).default; + } + app = createServer({ storagePath: require.resolve('../../test/fixtures/template-values.json') }); + // Supertest treats non-JSON responses as text; ensure JSON parse only for JSON endpoints. + +}, 15000); + +describe('/health and /tools', () => { + test('GET /health returns ok', async () => { + const res = await request(app).get('/health').expect(200); + expect(res.body).toHaveProperty('ok', true); + expect(res.body).toHaveProperty('serverInfo'); + }); + + test('GET /tools returns a list', async () => { + const res = await request(app).get('/tools').expect(200); + expect(res.body).toHaveProperty('tools'); + expect(Array.isArray(res.body.tools)).toBe(true); + }); +}); + +describe('/call', () => { + test('POST /call unauthorized without key', async () => { + await request(app).post('/call').send({ tool: 'echo', args: 'x' }).expect(401); + }); + + test('POST /call with key returns echo result', async () => { + const res = await request(app) + .post('/call') + .set('X-Api-Key', 'testkey') + .send({ tool: 'echo', args: 'hello' }) + .expect(200); + expect(res.body.ok).toBe(true); + expect(res.body.result).toHaveProperty('echoed'); + expect(res.body.result.echoed).toBe('hello'); + }); + + test('POST /call with tool error returns 500', async () => { + const res = await request(app) + .post('/call') + .set('X-Api-Key', 'testkey') + .send({ tool: 'error' }) + .expect(500); + expect(res.body.ok).toBe(false); + expect(res.body).toHaveProperty('error'); + }); +}); + +describe('/call-stream', () => { + test('POST /call-stream streams chunks', async () => { + const res = await request(app) + .post('/call-stream') + .set('X-Api-Key', 'testkey') + .buffer(true) + .parse((res, cb) => { + let data = ''; + res.setEncoding('utf8'); + res.on('data', (chunk) => { data += chunk; }); + res.on('end', () => cb(null, data)); + }) + .send({ tool: 'echo', args: 'stream' }) + .expect(200); + + const chunks = String(res.body).trim().split('\n').filter(Boolean).map(l => JSON.parse(l)); + expect(chunks.length).toBeGreaterThanOrEqual(1); + expect(chunks[0]).toHaveProperty('seq'); + }); +}); diff --git a/mcp/__tests__/setup-jest.js b/mcp/__tests__/setup-jest.js new file mode 100644 index 000000000..17760005f --- /dev/null +++ b/mcp/__tests__/setup-jest.js @@ -0,0 +1,18 @@ +// This file is referenced by jest.setupFiles and should not contain tests. +// Keep it empty so Jest doesn't collect it as a test suite.// setup for ESM/experimental-vm-modules environment compatibility +// Provide global require shim used by some older tests +if (typeof global.require === 'undefined') { + try { + const { createRequire } = require('module'); + global.require = createRequire(typeof __filename !== 'undefined' ? __filename : process.cwd()); + } catch (e) { + // fallback to normal require + global.require = require; + } +} + +// Provide default envs to stabilize legacy tests that expect MPKIT_* to be defined +process.env.MPKIT_URL = process.env.MPKIT_URL || 'https://example.com'; +process.env.MPKIT_EMAIL = process.env.MPKIT_EMAIL || 'pos-cli@example.com'; +process.env.MPKIT_TOKEN = process.env.MPKIT_TOKEN || 'test-token'; + diff --git a/mcp/__tests__/unit.auth.test.js b/mcp/__tests__/unit.auth.test.js new file mode 100644 index 000000000..84701de0b --- /dev/null +++ b/mcp/__tests__/unit.auth.test.js @@ -0,0 +1,22 @@ +const Auth = require('../auth'); + +describe('Auth middleware', () => { + test('middleware allows with correct key', () => { + const auth = new Auth(); + const mw = auth.middleware(); + const req = { get: () => 'testkey', query: {} }; + const res = { status: () => ({ json: () => {} }) }; + let called = false; + mw(req, res, () => { called = true; }); + expect(called).toBe(true); + }); + + test('middleware denies without key', () => { + const auth = new Auth(); + const mw = auth.middleware(); + const req = { get: () => null, query: {} }; + const res = { status: jest.fn(() => ({ json: jest.fn() })) }; + mw(req, res, () => {}); + expect(res.status).toHaveBeenCalledWith(401); + }); +}); diff --git a/mcp/__tests__/unit.proxy.test.js b/mcp/__tests__/unit.proxy.test.js new file mode 100644 index 000000000..2da464650 --- /dev/null +++ b/mcp/__tests__/unit.proxy.test.js @@ -0,0 +1,28 @@ +const ProxyWrapper = require('../proxy-wrapper'); + +describe('ProxyWrapper', () => { + const proxy = new ProxyWrapper(); + + test('call returns echo', async () => { + const res = await proxy.call({ tool: 'echo', args: 'x' }); + expect(res).toHaveProperty('echoed', 'x'); + }); + + test('call with unknown tool returns called', async () => { + const res = await proxy.call({ tool: 'foo' }); + expect(res).toHaveProperty('called', 'foo'); + }); + + test('call throws on invalid payload', async () => { + await expect(proxy.call(null)).rejects.toThrow('invalid payload'); + }); + + test('callStream yields chunks', async () => { + const chunks = []; + for await (const c of proxy.callStream({ tool: 'echo', args: 's' })) { + chunks.push(c); + } + expect(chunks.length).toBe(3); + expect(chunks[0]).toHaveProperty('seq', 0); + }); +}); diff --git a/mcp/__tests__/unit.storage.test.js b/mcp/__tests__/unit.storage.test.js new file mode 100644 index 000000000..fec873301 --- /dev/null +++ b/mcp/__tests__/unit.storage.test.js @@ -0,0 +1,18 @@ +const Storage = require('../storage'); +const path = require('path'); + +describe('Storage', () => { + const fixtures = path.join(__dirname, '..', '..', 'test', 'fixtures'); + const storage = new Storage({ storagePath: fixtures }); + + test('listEnvironments returns array', async () => { + const list = await storage.listEnvironments(); + expect(Array.isArray(list)).toBe(true); + expect(list).toContain('template-values'); + }); + + test('getEnvironment returns parsed JSON', async () => { + const env = await storage.getEnvironment('template-values'); + expect(env).toHaveProperty('aKey', 'aStringValue'); + }); +}); diff --git a/mcp/jest.setup.js b/mcp/jest.setup.js new file mode 100644 index 000000000..da5269225 --- /dev/null +++ b/mcp/jest.setup.js @@ -0,0 +1,16 @@ +// setup for ESM/experimental-vm-modules environment compatibility +// Provide global require shim used by some older tests +if (typeof global.require === 'undefined') { + try { + const { createRequire } = require('module'); + global.require = createRequire(typeof __filename !== 'undefined' ? __filename : process.cwd()); + } catch (e) { + // fallback to normal require + global.require = require; + } +} + +// Provide default envs to stabilize legacy tests that expect MPKIT_* to be defined +process.env.MPKIT_URL = process.env.MPKIT_URL || 'https://example.com'; +process.env.MPKIT_EMAIL = process.env.MPKIT_EMAIL || 'pos-cli@example.com'; +process.env.MPKIT_TOKEN = process.env.MPKIT_TOKEN || 'test-token'; diff --git a/package-lock.json b/package-lock.json index b2815c892..961c23f7b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@platformos/pos-cli", - "version": "6.0.0-beta.0", + "version": "6.0.0-beta.7", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@platformos/pos-cli", - "version": "6.0.0-beta.0", + "version": "6.0.0-beta.7", "bundleDependencies": [ "commander", "degit", @@ -65,6 +65,7 @@ "pos-cli-env": "bin/pos-cli-env.js", "pos-cli-env-add": "bin/pos-cli-env-add.js", "pos-cli-env-list": "bin/pos-cli-env-list.js", + "pos-cli-fetch-logs": "bin/pos-cli-fetch-logs.js", "pos-cli-gui": "bin/pos-cli-gui.js", "pos-cli-gui-serve": "bin/pos-cli-gui-serve.js", "pos-cli-init": "bin/pos-cli-init.js", @@ -72,6 +73,8 @@ "pos-cli-logsv2": "bin/pos-cli-logsv2.js", "pos-cli-logsv2-search": "bin/pos-cli-logsv2-search.js", "pos-cli-lsp": "bin/pos-cli-lsp.js", + "pos-cli-mcp": "bin/pos-cli-mcp.js", + "pos-cli-mcp-config": "bin/pos-cli-mcp-config.js", "pos-cli-migrations": "bin/pos-cli-migrations.js", "pos-cli-modules": "bin/pos-cli-modules.js", "pos-cli-sync": "bin/pos-cli-sync.js", @@ -79,6 +82,7 @@ "pos-cli-test-run": "bin/pos-cli-test-run.js" }, "devDependencies": { + "@vitest/coverage-v8": "^4.0.17", "dotenv": "^17.3.1", "globals": "^17.3.0", "nock": "^14.0.11", @@ -106,6 +110,16 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, "node_modules/@babel/helper-validator-identifier": { "version": "7.28.5", "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", @@ -115,6 +129,46 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/parser": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.0.tgz", + "integrity": "sha512-IyDgFV5GeDUVX4YdF/3CPULtVGSXXMLh1xVIgdCgxApktqnQV0r7/8Nqthg+8YLGaAtdyIlo2qIdZrbCv4+7ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-1.0.2.tgz", + "integrity": "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@esbuild/aix-ppc64": { "version": "0.27.3", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", @@ -903,6 +957,16 @@ "integrity": "sha512-SQ7Kzhh9+D+ZW9MA0zkYv3VXhIDNx+LzM6EJ+/65I3QY+enU6Itte7E5XX7EWrqLW2FN4n06GWzBnPoC3th2aQ==", "license": "ISC" }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, "node_modules/@jridgewell/sourcemap-codec": { "version": "1.5.5", "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", @@ -910,6 +974,17 @@ "dev": true, "license": "MIT" }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, "node_modules/@kwsites/file-exists": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/@kwsites/file-exists/-/file-exists-1.1.1.tgz", @@ -2224,6 +2299,37 @@ "@types/node": "*" } }, + "node_modules/@vitest/coverage-v8": { + "version": "4.0.18", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.18.tgz", + "integrity": "sha512-7i+N2i0+ME+2JFZhfuz7Tg/FqKtilHjGyGvoHYQ6iLV0zahbsJ9sljC9OcFcPDbhYKCet+sG8SsVqlyGvPflZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@bcoe/v8-coverage": "^1.0.2", + "@vitest/utils": "4.0.18", + "ast-v8-to-istanbul": "^0.3.10", + "istanbul-lib-coverage": "^3.2.2", + "istanbul-lib-report": "^3.0.1", + "istanbul-reports": "^3.2.0", + "magicast": "^0.5.1", + "obug": "^2.1.1", + "std-env": "^3.10.0", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/browser": "4.0.18", + "vitest": "4.0.18" + }, + "peerDependenciesMeta": { + "@vitest/browser": { + "optional": true + } + } + }, "node_modules/@vitest/expect": { "version": "4.0.18", "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.18.tgz", @@ -3070,6 +3176,25 @@ "node": ">=12" } }, + "node_modules/ast-v8-to-istanbul": { + "version": "0.3.11", + "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.11.tgz", + "integrity": "sha512-Qya9fkoofMjCBNVdWINMjB5KZvkYfaO9/anwkWnjxibpWUxo5iHl2sOdP7/uAqaRuUYuoo8rDwnbaaKVFxoUvw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.31", + "estree-walker": "^3.0.3", + "js-tokens": "^10.0.0" + } + }, + "node_modules/ast-v8-to-istanbul/node_modules/js-tokens": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-10.0.0.tgz", + "integrity": "sha512-lM/UBzQmfJRo9ABXbPWemivdCW8V2G8FHaHdypQaIy523snUjog0W71ayWXTjiR+ixeMyVHN2XcpnTd/liPg/Q==", + "dev": true, + "license": "MIT" + }, "node_modules/async": { "version": "3.2.6", "resolved": "https://registry.npmjs.org/async/-/async-3.2.6.tgz", @@ -4895,6 +5020,16 @@ "integrity": "sha512-+xGQY0YyAWCnqy7Cd++hc2JqMYzlm0dG30Jd0beaA64sROr8C4nt8Yc9V5Ro3avlSUDTN0ulqP/VBKi1/lLygw==", "license": "MIT" }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", @@ -4956,6 +5091,13 @@ "node": "^20.17.0 || >=22.9.0" } }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, "node_modules/http-cache-semantics": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", @@ -5408,6 +5550,45 @@ "node": ">=0.10.0" } }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/jake": { "version": "10.9.4", "resolved": "https://registry.npmjs.org/jake/-/jake-10.9.4.tgz", @@ -5709,6 +5890,34 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/magicast": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.2.tgz", + "integrity": "sha512-E3ZJh4J3S9KfwdjZhe2afj6R9lGIN5Pher1pF39UGrXRqq/VDaGVIGN13BjHd2u8B61hArAGOnso7nBOouW3TQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "source-map-js": "^1.2.1" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/make-fetch-happen": { "version": "15.0.3", "resolved": "https://registry.npmjs.org/make-fetch-happen/-/make-fetch-happen-15.0.3.tgz", @@ -8156,6 +8365,19 @@ "node": ">=14.18.0" } }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/tar": { "version": "7.5.9", "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.9.tgz", @@ -8694,6 +8916,7 @@ "integrity": "sha512-hOQuK7h0FGKgBAas7v0mSAsnvrIgAvWmRFjmzpJ7SwFHH3g1k2u37JtYwOwmEKhK6ZO3v9ggDBBm0La1LCK4uQ==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@vitest/expect": "4.0.18", "@vitest/mocker": "4.0.18", diff --git a/package.json b/package.json index 2d7c65513..7b6422cf1 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@platformos/pos-cli", - "version": "6.0.0-beta.0", + "version": "6.0.0-beta.7", "description": "Manage your platformOS application", "type": "module", "imports": { @@ -16,6 +16,7 @@ "pretest:unit": "npm install --prefix test/fixtures/yeoman --silent", "test:unit": "vitest run test/unit", "test:integration": "vitest run test/integration", + "test:mcp-min": "vitest run mcp-min/__tests__", "postinstall": "node ./scripts/check-node-version.js" }, "files": [ @@ -25,7 +26,8 @@ "gui/graphql/public", "gui/admin/dist", "gui/next/build", - "scripts" + "scripts", + "mcp-min" ], "main": "./bin/pos-cli.js", "engines": { @@ -88,6 +90,7 @@ "pos-cli-env": "bin/pos-cli-env.js", "pos-cli-env-add": "bin/pos-cli-env-add.js", "pos-cli-env-list": "bin/pos-cli-env-list.js", + "pos-cli-fetch-logs": "bin/pos-cli-fetch-logs.js", "pos-cli-gui": "bin/pos-cli-gui.js", "pos-cli-gui-serve": "bin/pos-cli-gui-serve.js", "pos-cli-init": "bin/pos-cli-init.js", @@ -95,6 +98,8 @@ "pos-cli-logsv2": "bin/pos-cli-logsv2.js", "pos-cli-logsv2-search": "bin/pos-cli-logsv2-search.js", "pos-cli-lsp": "bin/pos-cli-lsp.js", + "pos-cli-mcp": "bin/pos-cli-mcp.js", + "pos-cli-mcp-config": "bin/pos-cli-mcp-config.js", "pos-cli-migrations": "bin/pos-cli-migrations.js", "pos-cli-modules": "bin/pos-cli-modules.js", "pos-cli-sync": "bin/pos-cli-sync.js", @@ -111,6 +116,7 @@ }, "homepage": "https://github.com/Platform-OS/pos-cli/issues#readme", "devDependencies": { + "@vitest/coverage-v8": "^4.0.17", "dotenv": "^17.3.1", "globals": "^17.3.0", "nock": "^14.0.11", diff --git a/test/fixtures/deploy/correct_with_assets/app/assets/bar.js b/test/fixtures/deploy/correct_with_assets/app/assets/bar.js index 13b610307..2976ddbb6 100644 --- a/test/fixtures/deploy/correct_with_assets/app/assets/bar.js +++ b/test/fixtures/deploy/correct_with_assets/app/assets/bar.js @@ -1,107 +1,285 @@ -// Test asset file -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; -x; +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x +x x x x diff --git a/test/fixtures/modules/good/tmp___/release.zip b/test/fixtures/modules/good/tmp___/release.zip new file mode 100644 index 000000000..678b976a9 Binary files /dev/null and b/test/fixtures/modules/good/tmp___/release.zip differ diff --git a/test/unit/data-utils.test.js b/test/unit/data-utils.test.js index 910db6deb..5cb8458f4 100644 --- a/test/unit/data-utils.test.js +++ b/test/unit/data-utils.test.js @@ -212,7 +212,7 @@ describe('waitForStatus', () => { await expect(promise).rejects.toEqual(error); expect(logger.Debug).toHaveBeenCalledWith( - '[ERR] waitForStatus did not receive `status` in response object', + '[waitForStatus] Poll error', error ); }); diff --git a/test/unit/data.test.js b/test/unit/data.test.js index d04a5034a..eccf52979 100644 --- a/test/unit/data.test.js +++ b/test/unit/data.test.js @@ -3,9 +3,9 @@ import cliPath from '#test/utils/cliPath'; const env = Object.assign(process.env, { CI: true, - MPKIT_URL: 'http://example.com', + MPKIT_URL: 'http://google.com', MPKIT_TOKEN: '1234', - MPKIT_EMAIL: 'foo@example.com' + MPKIT_EMAIL: 'foo@google.com' }); describe('Data clean', () => { @@ -16,6 +16,16 @@ describe('Data clean', () => { }); }); +describe('Data clean real', () => { + test('shows message when wrong confirmation passed inline', async () => { + const {code, stderr, stdout} = await exec(`echo "CLEAN DATA" | ${cliPath} data clean`, { env }); + expect(stderr).toMatch('WARNING!!! You are going to REMOVE your data from instance: http://google.com') + expect(stderr).toMatch('There is no coming back.') + expect(stderr).toMatch('data_clean') + expect(stderr).toMatch(env.MPKIT_URL) + }); +}); + describe('Data import', () => { test('should show message when wrong file for data import', async () => { const {code, stderr} = await exec(`echo "wrong confirm" | ${cliPath} data import foo -p ./test/fixtures/wrong_json.json`, { env }); diff --git a/test/unit/s3UploadFile.test.js b/test/unit/s3UploadFile.test.js index c69a0ec9a..e2fa4823e 100644 --- a/test/unit/s3UploadFile.test.js +++ b/test/unit/s3UploadFile.test.js @@ -12,6 +12,9 @@ vi.mock('fs'); // Mock mime module vi.mock('mime'); +// Mock logger module +vi.mock('../../lib/logger.js'); + // Mock global fetch global.fetch = vi.fn(); @@ -24,6 +27,10 @@ describe('s3UploadFile', () => { // Reset fetch mock global.fetch.mockReset(); + // Setup logger mock before importing the module + const loggerModule = await import('../../lib/logger.js'); + loggerModule.Debug = vi.fn(); + // Import module fresh for each test const module = await import('#lib/s3UploadFile.js'); uploadFile = module.uploadFile; @@ -43,6 +50,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: fileSize }); fs.readFileSync.mockReturnValue(fileBuffer); + mime.getType.mockReturnValue('image/jpeg'); global.fetch.mockResolvedValue({ ok: true, status: 200 @@ -52,10 +60,12 @@ describe('s3UploadFile', () => { expect(fs.statSync).toHaveBeenCalledWith(fileName); expect(fs.readFileSync).toHaveBeenCalledWith(fileName); + expect(mime.getType).toHaveBeenCalledWith(fileName); expect(global.fetch).toHaveBeenCalledWith(s3Url, { method: 'PUT', headers: { - 'Content-Length': fileSize.toString() + 'Content-Length': fileSize.toString(), + 'Content-Type': 'image/jpeg' }, body: fileBuffer }); @@ -71,6 +81,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: fileSize }); fs.readFileSync.mockReturnValue(largeBuffer); + mime.getType.mockReturnValue('video/mp4'); global.fetch.mockResolvedValue({ ok: true, status: 200 @@ -79,10 +90,12 @@ describe('s3UploadFile', () => { const result = await uploadFile(fileName, s3Url); expect(fs.readFileSync).toHaveBeenCalledWith(fileName); + expect(mime.getType).toHaveBeenCalledWith(fileName); expect(global.fetch).toHaveBeenCalledWith(s3Url, expect.objectContaining({ method: 'PUT', headers: { - 'Content-Length': fileSize.toString() + 'Content-Length': fileSize.toString(), + 'Content-Type': 'video/mp4' }, body: largeBuffer })); @@ -95,6 +108,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: 1000 }); fs.readFileSync.mockReturnValue(Buffer.from('content')); + mime.getType.mockReturnValue('image/jpeg'); global.fetch.mockResolvedValue({ ok: false, status: 403 @@ -109,6 +123,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: 1000 }); fs.readFileSync.mockReturnValue(Buffer.from('content')); + mime.getType.mockReturnValue('image/jpeg'); global.fetch.mockResolvedValue({ ok: false, status: 500 @@ -123,6 +138,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: 1000 }); fs.readFileSync.mockReturnValue(Buffer.from('content')); + mime.getType.mockReturnValue('image/jpeg'); global.fetch.mockRejectedValue(new Error('Network error')); await expect(uploadFile(fileName, s3Url)).rejects.toThrow('Network error'); @@ -135,6 +151,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: 0 }); fs.readFileSync.mockReturnValue(emptyBuffer); + mime.getType.mockReturnValue('text/plain'); global.fetch.mockResolvedValue({ ok: true, status: 200 @@ -145,7 +162,8 @@ describe('s3UploadFile', () => { expect(global.fetch).toHaveBeenCalledWith(s3Url, { method: 'PUT', headers: { - 'Content-Length': '0' + 'Content-Length': '0', + 'Content-Type': 'text/plain' }, body: emptyBuffer }); @@ -154,10 +172,10 @@ describe('s3UploadFile', () => { test('handles various image formats', async () => { const testCases = [ - { fileName: '/path/to/image.png', size: 500000 }, - { fileName: '/path/to/image.jpg', size: 300000 }, - { fileName: '/path/to/image.gif', size: 100000 }, - { fileName: '/path/to/image.svg', size: 50000 } + { fileName: '/path/to/image.png', size: 500000, mimeType: 'image/png' }, + { fileName: '/path/to/image.jpg', size: 300000, mimeType: 'image/jpeg' }, + { fileName: '/path/to/image.gif', size: 100000, mimeType: 'image/gif' }, + { fileName: '/path/to/image.svg', size: 50000, mimeType: 'image/svg+xml' } ]; for (const testCase of testCases) { @@ -166,13 +184,48 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: testCase.size }); fs.readFileSync.mockReturnValue(buffer); + mime.getType.mockReturnValue(testCase.mimeType); global.fetch.mockResolvedValue({ ok: true, status: 200 }); await uploadFile(testCase.fileName, s3Url); expect(fs.readFileSync).toHaveBeenCalledWith(testCase.fileName); + expect(mime.getType).toHaveBeenCalledWith(testCase.fileName); + expect(global.fetch).toHaveBeenCalledWith(s3Url, expect.objectContaining({ + headers: expect.objectContaining({ + 'Content-Type': testCase.mimeType + }) + })); } }); + + test('correctly sets JavaScript MIME type for .js files', async () => { + const fileName = '/path/to/contact.js'; + const s3Url = 'https://s3.amazonaws.com/bucket/contact.js'; + const fileBuffer = Buffer.from('console.log("test");'); + const fileSize = fileBuffer.length; + + fs.statSync.mockReturnValue({ size: fileSize }); + fs.readFileSync.mockReturnValue(fileBuffer); + mime.getType.mockReturnValue('application/javascript'); + global.fetch.mockResolvedValue({ + ok: true, + status: 200 + }); + + const result = await uploadFile(fileName, s3Url); + + expect(mime.getType).toHaveBeenCalledWith(fileName); + expect(global.fetch).toHaveBeenCalledWith(s3Url, { + method: 'PUT', + headers: { + 'Content-Length': fileSize.toString(), + 'Content-Type': 'application/javascript' + }, + body: fileBuffer + }); + expect(result).toBe(s3Url); + }); }); describe('uploadFileFormData', () => { @@ -378,6 +431,80 @@ describe('s3UploadFile', () => { expect(result).toBe(true); }); + + test('sets MIME type on Blob constructor, not as unsigned FormData field', async () => { + const filePath = '/path/to/image.png'; + const data = { + url: 'https://s3.amazonaws.com/bucket', + fields: { + key: 'assets/${filename}', + 'Content-Type': 'application/octet-stream', // Signed value from presigned URL + policy: 'base64-encoded-policy', + signature: 'signature-value' + } + }; + const fileBuffer = Buffer.from('PNG image content'); + + fs.readFileSync.mockReturnValue(fileBuffer); + mime.getType.mockReturnValue('image/png'); + const fetchCall = { + ok: true, + status: 200 + }; + global.fetch.mockResolvedValue(fetchCall); + + await uploadFileFormData(filePath, data); + + // Verify fetch was called + expect(global.fetch).toHaveBeenCalledWith(data.url, expect.objectContaining({ + method: 'POST', + body: expect.any(FormData) + })); + + // Get the FormData that was passed to fetch + const formDataArg = global.fetch.mock.calls[0][1].body; + + // The Blob should have correct MIME type set via options object + const blobEntry = formDataArg.get('file'); + expect(blobEntry.type).toBe('image/png'); + + // There should be only ONE Content-Type field (the signed one from presigned URL) + // NOT a second unsigned Content-Type field (which was the bug) + const contentTypeFields = formDataArg.getAll('Content-Type'); + expect(contentTypeFields).toHaveLength(1); // Only the signed Content-Type from presigned URL + expect(contentTypeFields[0]).toBe('application/octet-stream'); // The signed value should remain + }); + + test('adds Content-Type form field when presigned fields do not include it', async () => { + const filePath = '/path/to/image.png'; + const data = { + url: 'https://s3.amazonaws.com/bucket', + fields: { + key: 'assets/${filename}', + policy: 'base64-encoded-policy', + signature: 'signature-value' + // Note: No Content-Type in presigned fields + } + }; + const fileBuffer = Buffer.from('PNG image content'); + + fs.readFileSync.mockReturnValue(fileBuffer); + mime.getType.mockReturnValue('image/png'); + global.fetch.mockResolvedValue({ ok: true, status: 200 }); + + await uploadFileFormData(filePath, data); + + const formDataArg = global.fetch.mock.calls[0][1].body; + + // Content-Type should be added as form field since presigned URL didn't include it + const contentTypeFields = formDataArg.getAll('Content-Type'); + expect(contentTypeFields).toHaveLength(1); + expect(contentTypeFields[0]).toBe('image/png'); + + // Blob should also have correct type + const blobEntry = formDataArg.get('file'); + expect(blobEntry.type).toBe('image/png'); + }); }); describe('Memory and Buffer handling', () => { @@ -451,6 +578,7 @@ describe('s3UploadFile', () => { fs.statSync.mockReturnValue({ size: 1000 }); fs.readFileSync.mockReturnValue(Buffer.from('content')); + mime.getType.mockReturnValue('image/jpeg'); global.fetch.mockResolvedValue({ ok: false, status: 403 diff --git a/test/utils/fixtures.js b/test/utils/fixtures.js new file mode 100644 index 000000000..2cf9ac3bd --- /dev/null +++ b/test/utils/fixtures.js @@ -0,0 +1,24 @@ +const fs = require('fs'); +const path = require('path'); + +const dotPosPath = path.resolve('.pos'); +let originalDotPos = null; + +function writeDotPos(content) { + // Save original if exists + if (fs.existsSync(dotPosPath)) { + originalDotPos = fs.readFileSync(dotPosPath, 'utf8'); + } + fs.writeFileSync(dotPosPath, JSON.stringify(content, null, 2)); +} + +function removeDotPos() { + if (originalDotPos != null) { + fs.writeFileSync(dotPosPath, originalDotPos); + } else if (fs.existsSync(dotPosPath)) { + fs.unlinkSync(dotPosPath); + } + originalDotPos = null; +} + +module.exports = { writeDotPos, removeDotPos }; diff --git a/vitest.config.js b/vitest.config.js index 5a5f44012..3caecf576 100644 --- a/vitest.config.js +++ b/vitest.config.js @@ -4,7 +4,7 @@ export default defineConfig({ test: { environment: 'node', globals: true, - include: ['test/**/*.{test,spec}.js'], + include: ['test/**/*.{test,spec}.js', 'mcp-min/__tests__/**/*.{test,spec}.js', 'mcp-min/__tests__/*.test.js', 'mcp-min/__tests__/*.test.cjs.js'], fileParallelism: true, globalSetup: ['./test/global-setup.js'], setupFiles: ['./test/vitest-setup.js'],