diff --git a/.github/workflows/marketplace-publish.yml b/.github/workflows/marketplace-publish.yml index aef91b2d323..113d4758617 100644 --- a/.github/workflows/marketplace-publish.yml +++ b/.github/workflows/marketplace-publish.yml @@ -57,14 +57,6 @@ jobs: git tag -a "v${current_package_version}" -m "Release v${current_package_version}" git push origin "v${current_package_version}" --no-verify echo "Successfully created and pushed git tag v${current_package_version}" - - name: Publish Extension - env: - VSCE_PAT: ${{ secrets.VSCE_PAT }} - OVSX_PAT: ${{ secrets.OVSX_PAT }} - run: | - current_package_version=$(node -p "require('./src/package.json').version") - pnpm --filter roo-cline publish:marketplace - echo "Successfully published version $current_package_version to VS Code Marketplace" - name: Create GitHub Release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/nightly-publish.yml b/.github/workflows/nightly-publish.yml index e25bdba990a..cebe01a0b5c 100644 --- a/.github/workflows/nightly-publish.yml +++ b/.github/workflows/nightly-publish.yml @@ -42,11 +42,3 @@ jobs: EOF - name: Build VSIX run: pnpm vsix:nightly # Produces bin/roo-code-nightly-0.0.[count].vsix - - name: Publish to VS Code Marketplace - env: - VSCE_PAT: ${{ secrets.VSCE_PAT }} - run: npx vsce publish --packagePath "bin/$(/bin/ls bin | head -n1)" - - name: Publish to Open VSX Registry - env: - OVSX_PAT: ${{ secrets.OVSX_PAT }} - run: npx ovsx publish "bin/$(ls bin | head -n1)" diff --git a/.husky/pre-commit b/.husky/pre-commit deleted file mode 100644 index a0e3a53df53..00000000000 --- a/.husky/pre-commit +++ /dev/null @@ -1,27 +0,0 @@ -branch="$(git rev-parse --abbrev-ref HEAD)" - -if [ "$branch" = "main" ]; then - echo "You can't commit directly to main - please check out a branch." - exit 1 -fi - -# Detect if running on Windows and use pnpm.cmd, otherwise use pnpm. -if [ "$OS" = "Windows_NT" ]; then - pnpm_cmd="pnpm.cmd" -else - if command -v pnpm >/dev/null 2>&1; then - pnpm_cmd="pnpm" - else - pnpm_cmd="npx pnpm" - fi -fi - -# Detect if running on Windows and use npx.cmd, otherwise use npx. -if [ "$OS" = "Windows_NT" ]; then - npx_cmd="npx.cmd" -else - npx_cmd="npx" -fi - -$npx_cmd lint-staged -$pnpm_cmd lint diff --git a/.husky/pre-push b/.husky/pre-push deleted file mode 100644 index 4cf91d95800..00000000000 --- a/.husky/pre-push +++ /dev/null @@ -1,42 +0,0 @@ -branch="$(git rev-parse --abbrev-ref HEAD)" - -if [ "$branch" = "main" ]; then - echo "You can't push directly to main - please check out a branch." - exit 1 -fi - -# Detect if running on Windows and use pnpm.cmd, otherwise use pnpm. -if [ "$OS" = "Windows_NT" ]; then - pnpm_cmd="pnpm.cmd" -else - if command -v pnpm >/dev/null 2>&1; then - pnpm_cmd="pnpm" - else - pnpm_cmd="npx pnpm" - fi -fi - -$pnpm_cmd run check-types - -# Use dotenvx to securely load .env.local and run commands that depend on it -if [ -f ".env.local" ]; then - # Check if RUN_TESTS_ON_PUSH is set to true and run tests with dotenvx - if npx dotenvx get RUN_TESTS_ON_PUSH -f .env.local 2>/dev/null | grep -q "^true$"; then - npx dotenvx run -f .env.local -- $pnpm_cmd run test - fi -else - # Fallback: run tests if RUN_TESTS_ON_PUSH is set in regular environment - if [ "$RUN_TESTS_ON_PUSH" = "true" ]; then - $pnpm_cmd run test - fi -fi - -# Check for new changesets. -NEW_CHANGESETS=$(find .changeset -name "*.md" ! -name "README.md" | wc -l | tr -d ' ') -echo "Changeset files: $NEW_CHANGESETS" - -if [ "$NEW_CHANGESETS" = "0" ]; then - echo "-------------------------------------------------------------------------------------" - echo "Changes detected. Please run 'pnpm changeset' to create a changeset if applicable." - echo "-------------------------------------------------------------------------------------" -fi diff --git a/build.sh b/build.sh new file mode 100755 index 00000000000..ea533d32676 --- /dev/null +++ b/build.sh @@ -0,0 +1,4 @@ +corepack prepare pnpm@10.8.1 --activate +pnpm -v +cd src +pnpm vsix diff --git a/docs/superpowers/specs/2026-05-09-openai-compatible-reasoning-design.md b/docs/superpowers/specs/2026-05-09-openai-compatible-reasoning-design.md new file mode 100644 index 00000000000..c5f949dcb71 --- /dev/null +++ b/docs/superpowers/specs/2026-05-09-openai-compatible-reasoning-design.md @@ -0,0 +1,129 @@ +# OpenAI-compatible reasoning payload shim + +Date: 2026-05-09 + +## Problem + +OpenAI-compatible request path currently sends `reasoningEffort` into AI SDK, but it does not add Responses-style `reasoning` payload. + +User expectation for OpenAI-compatible requests with reasoning enabled: + +- keep `reasoning_effort` +- also send `reasoning: { effort, summary: "auto" }` + +This should match OpenAI Codex / OpenAI Native behavior shape, while staying inside OpenAI-compatible chat completions path. + +## Goals + +- When resolved model params include `reasoningEffort`, send both: + - `reasoning_effort` + - `reasoning: { effort, summary: "auto" }` +- Apply same behavior for: + - `createMessage` + - `completePrompt` +- Keep `reasoning_effort` value unchanged, including extended values like `xhigh` or `none` when current model/settings resolve them. +- Leave request unchanged when reasoning is off. + +## Non-goals + +- No change to `openai.ts`. +- No change to `openai-native.ts` or `openai-codex.ts`. +- No UI/settings changes. +- No new provider capability detection. + +## Design + +### 1) Pass reasoning to AI SDK provider + +`OpenAICompatibleHandler` will build `providerOptions` for `streamText` and `generateText`. + +Use generic AI SDK key: + +```ts +providerOptions: { + openaiCompatible: { + reasoningEffort: model.reasoningEffort, + }, +} +``` + +Only include this block when `model.reasoningEffort` is defined. + +### 2) Add request-body transform hook + +Extend `OpenAICompatibleConfig` with AI SDK `transformRequestBody` hook and pass it into `createOpenAICompatible(...)`. + +Hook behavior: + +- if body has no `reasoning_effort`, return body unchanged +- if body has `reasoning_effort`, add/update: + ```ts + reasoning: { + effort: body.reasoning_effort, + summary: "auto", + } + ``` +- keep `reasoning_effort` in body +- preserve other fields + +This hook must apply to both streaming and non-streaming calls, because AI SDK uses same provider transform for both. + +### 3) Keep logic localized + +Implement inside `src/api/providers/openai-compatible.ts` only. + +Reason: + +- current `OpenAICompatibleHandler` is only consumer path +- current OpenAI-compatible provider family is Moonshot +- local change keeps scope tight and avoids touching shared reasoning transforms for unrelated providers + +## Data flow + +1. `MoonshotHandler.getModel()` resolves final `reasoningEffort` from settings/model defaults. +2. `OpenAICompatibleHandler.createMessage()` / `completePrompt()` pass `providerOptions.openaiCompatible.reasoningEffort` into AI SDK. +3. AI SDK serializes top-level `reasoning_effort`. +4. `transformRequestBody` adds `reasoning: { effort, summary: "auto" }` when `reasoning_effort` exists. +5. Provider receives both fields. + +## Fallback behavior + +- No reasoning selected: no `providerOptions`, no `reasoning`, no `reasoning_effort`. +- Disabled reasoning: same as above. +- Extended values: pass through unchanged. +- If body already contains `reasoning`, overwrite `effort` and `summary` only; do not drop unrelated keys. + +## Tests + +Add tests around current Moonshot/OpenAI-compatible path: + +1. **Constructor wiring** + + - `createOpenAICompatible` receives `transformRequestBody`. + +2. **Streaming request** + + - `createMessage()` passes `providerOptions.openaiCompatible.reasoningEffort` when enabled. + - transform adds both `reasoning_effort` and `reasoning`. + +3. **Non-streaming request** + + - `completePrompt()` uses same providerOptions path. + - transform adds both fields. + +4. **Disabled path** + + - when reasoning is disabled, request has neither `reasoning_effort` nor `reasoning`. + +5. **Extended effort path** + - `xhigh` stays `xhigh` in both fields. + +Prefer a small pure helper or captured transform callback in test so request-body mapping is asserted directly, not through SDK internals. + +## Acceptance criteria + +- OpenAI-compatible requests with reasoning enabled send both fields. +- `reasoning_effort` still exists. +- `reasoning.summary` is always `auto`. +- Behavior matches in stream and completion paths. +- No changes to unrelated provider paths. diff --git a/docs/superpowers/specs/2026-05-10-disable-commit-push-checks-design.md b/docs/superpowers/specs/2026-05-10-disable-commit-push-checks-design.md new file mode 100644 index 00000000000..5acabafaf4d --- /dev/null +++ b/docs/superpowers/specs/2026-05-10-disable-commit-push-checks-design.md @@ -0,0 +1,89 @@ +# Disable local commit/push checks design + +Date: 2026-05-10 + +## Problem +Repo currently enforces local Git checks through Husky hooks: +- `.husky/pre-commit` blocks commits to `main` +- `.husky/pre-commit` runs `lint-staged` +- `.husky/pre-commit` runs `pnpm lint` +- `.husky/pre-push` blocks pushes to `main` +- `.husky/pre-push` runs `check-types` +- `.husky/pre-push` may run tests +- `.husky/pre-push` prints changeset reminder + +User wants all local commit/push enforcement removed. +User also wants `lint-staged` config removed from `package.json`. + +## Goals +- Remove all local Husky checks from commit and push workflow. +- Remove branch protection logic from local Husky hooks. +- Remove `lint-staged` configuration from root `package.json`. +- Keep normal repo scripts (`lint`, `check-types`, `test`, etc.) available for manual use. + +## Non-goals +- No CI workflow changes. +- No package script removals for `lint`, `check-types`, `test`, or `build`. +- No Husky uninstall. +- No change to hooks other than commit/push hooks. + +## Design +### 1) Delete commit/push hook entrypoints +Delete: +- `.husky/pre-commit` +- `.husky/pre-push` + +Result: +- no local commit blocking on `main` +- no local push blocking on `main` +- no automatic `lint-staged`, `lint`, `check-types`, test, or changeset reminder during commit/push + +### 2) Keep Husky base wiring intact +Keep: +- `.husky/_/**` +- root `package.json` script: `"prepare": "husky"` + +Reason: +- smallest change for requested scope +- avoids changing global Git hook bootstrap behavior +- commit/push checks disappear because entrypoint hook files are gone + +### 3) Remove lint-staged config from package.json +Delete root-level `lint-staged` block from `package.json`. + +Reason: +- user explicitly asked to remove it +- after deleting `.husky/pre-commit`, this config is unused +- removes stale config from repo root + +## Data flow after change +Before: +1. `git commit` -> Husky runs `.husky/pre-commit` +2. hook blocks `main`, runs `lint-staged`, runs `pnpm lint` +3. `git push` -> Husky runs `.husky/pre-push` +4. hook blocks `main`, runs `check-types`, optional tests, changeset reminder + +After: +1. `git commit` -> no repo-defined pre-commit entrypoint +2. `git push` -> no repo-defined pre-push entrypoint +3. manual checks still possible via package scripts + +## Risks +- Developers can commit/push broken code locally without warning. +- Developers can commit/push directly to `main` locally unless remote branch protections exist. +- Formatting on staged files will no longer run automatically. + +These risks are accepted by request. + +## Testing +- Verify `.husky/pre-commit` no longer exists. +- Verify `.husky/pre-push` no longer exists. +- Verify `package.json` no longer contains `lint-staged` block. +- Optional smoke check: `git status` and `git commit` path should no longer invoke repo checks. + +## Acceptance criteria +- `.husky/pre-commit` deleted. +- `.husky/pre-push` deleted. +- root `package.json` no longer contains `lint-staged` config. +- `prepare: husky` remains unchanged. +- local commit/push no longer run repo-defined checks. \ No newline at end of file diff --git a/docs/superpowers/specs/2026-05-10-remove-vsix-publish-steps-design.md b/docs/superpowers/specs/2026-05-10-remove-vsix-publish-steps-design.md new file mode 100644 index 00000000000..6a7f83d3686 --- /dev/null +++ b/docs/superpowers/specs/2026-05-10-remove-vsix-publish-steps-design.md @@ -0,0 +1,94 @@ +# Remove VSIX publish steps design + +Date: 2026-05-10 + +## Problem +Two GitHub workflows still do local packaging *and* publish to registries: +- `.github/workflows/marketplace-publish.yml` +- `.github/workflows/nightly-publish.yml` + +User wants to keep: +- `.vsix` build +- git tag creation +- GitHub Release creation + +User wants to remove only registry publish steps: +- VS Code Marketplace publish +- Open VSX publish + +## Goals +- Keep VSIX packaging in both workflows. +- Keep git tag creation in `marketplace-publish.yml`. +- Keep GitHub Release creation in `marketplace-publish.yml`. +- Remove all registry publish steps from both workflows. + +## Non-goals +- No changes to `.vsix` packaging commands. +- No changes to git tag logic. +- No changes to GitHub Release logic. +- No changes to workflow triggers or job permissions unless needed by removed steps. +- No changes to app code or package scripts. + +## Design +### 1) Remove Marketplace publish from release workflow +In `.github/workflows/marketplace-publish.yml`, delete the `Publish Extension` step that runs: +- `pnpm --filter roo-cline publish:marketplace` + +Keep: +- `Package Extension` +- `Create and Push Git Tag` +- `Create GitHub Release` + +### 2) Remove registry publish from nightly workflow +In `.github/workflows/nightly-publish.yml`, delete both publishing steps: +- `Publish to VS Code Marketplace` +- `Publish to Open VSX Registry` + +Keep: +- checkout +- node/pnpm setup +- nightly version patching +- `Build VSIX` + +### 3) Preserve build artifacts +Both workflows should still leave the packaged `.vsix` in `bin/` so it can be used by: +- GitHub Release assets in the release workflow +- local inspection or later upload steps if added later + +## Data flow after change +### marketplace-publish.yml +1. Trigger on release-related PR close or manual dispatch. +2. Checkout code. +3. Setup Node/pnpm. +4. Package extension into `.vsix`. +5. Create/push git tag. +6. Create GitHub Release with `.vsix` attached. +7. No registry publish happens. + +### nightly-publish.yml +1. Trigger on `main` push or manual dispatch. +2. Checkout code. +3. Setup Node/pnpm. +4. Patch nightly version. +5. Build `.vsix`. +6. No registry publish happens. + +## Risks +- Nightly workflow no longer publishes to registries automatically. +- Release workflow no longer publishes to Marketplace automatically. +- Release distribution now depends on GitHub Release only. + +These risks are accepted by request. + +## Testing +- Verify `marketplace-publish.yml` no longer contains `publish:marketplace`. +- Verify `nightly-publish.yml` no longer contains Marketplace/Open VSX publish commands. +- Verify both workflows still contain packaging/build steps. +- Verify `marketplace-publish.yml` still contains git tag and GitHub Release steps. + +## Acceptance criteria +- `marketplace-publish.yml` no longer publishes to VS Code Marketplace. +- `nightly-publish.yml` no longer publishes to VS Code Marketplace or Open VSX. +- Both workflows still build `.vsix`. +- `marketplace-publish.yml` still tags and creates GitHub Release. +- No app code or package script changes required. \ No newline at end of file diff --git a/package.json b/package.json index de8dff751cb..4e7be84c66c 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "roo-code", "packageManager": "pnpm@10.8.1", "engines": { - "node": "20.19.2" + "node": ">=20.19.2" }, "scripts": { "preinstall": "node scripts/bootstrap.mjs", @@ -49,11 +49,6 @@ "turbo": "^2.5.6", "typescript": "5.8.3" }, - "lint-staged": { - "*.{js,jsx,ts,tsx,json,css,md}": [ - "prettier --write" - ] - }, "pnpm": { "onlyBuiltDependencies": [ "@vscode/ripgrep" diff --git a/scripts/capture_roocode_requests.py b/scripts/capture_roocode_requests.py new file mode 100644 index 00000000000..f82330dd29c --- /dev/null +++ b/scripts/capture_roocode_requests.py @@ -0,0 +1,335 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import base64 +import http.client +import json +import threading +from datetime import datetime, timezone +from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer +from pathlib import Path +from typing import Optional +from urllib.parse import urlsplit + +HOP_BY_HOP_HEADERS = { + "connection", + "proxy-connection", + "keep-alive", + "transfer-encoding", + "te", + "trailer", + "upgrade", +} + + +def utc_now() -> str: + return datetime.now(timezone.utc).isoformat(timespec="seconds") + + +def normalize_path(path: str) -> str: + if not path.startswith("/"): + return "/" + path + return path + + +def join_paths(base_path: str, suffix_path: str) -> str: + base_path = normalize_path(base_path or "/") + suffix_path = normalize_path(suffix_path or "/") + + if base_path != "/" and base_path.endswith("/"): + base_path = base_path[:-1] + if suffix_path == "/": + return base_path + if base_path == "/": + return suffix_path + return base_path + suffix_path + + +def is_textual_content(content_type: Optional[str]) -> bool: + if not content_type: + return False + ct = content_type.lower() + return any( + marker in ct + for marker in ( + "application/json", + "text/", + "application/xml", + "application/x-www-form-urlencoded", + "application/javascript", + "application/graphql", + "text/event-stream", + ) + ) + + +def format_body(body: bytes, content_type: Optional[str]) -> str: + if not body: + return "" + + if is_textual_content(content_type): + try: + text = body.decode("utf-8") + except UnicodeDecodeError: + text = None + + if text is not None: + if content_type and "json" in content_type.lower(): + try: + return json.dumps(json.loads(text), indent=2, ensure_ascii=False) + except json.JSONDecodeError: + return text + return text + + return ( + f"\n" + f"base64:\n{base64.b64encode(body).decode('ascii')}" + ) + + +class RequestLogger: + def __init__(self, log_file: Path): + self.log_file = log_file + self.lock = threading.Lock() + self.counter = 0 + self.log_file.parent.mkdir(parents=True, exist_ok=True) + + def write(self, entry: str) -> None: + with self.lock: + with self.log_file.open("a", encoding="utf-8") as fp: + fp.write(entry) + if not entry.endswith("\n"): + fp.write("\n") + fp.flush() + + def next_id(self) -> int: + with self.lock: + self.counter += 1 + return self.counter + + +class ProxyHandler(BaseHTTPRequestHandler): + protocol_version = "HTTP/1.1" + + upstream_scheme = "https" + upstream_host = "api.openai.com" + upstream_port = 443 + upstream_base_path = "/v1" + local_prefix = "/v1" + logger: RequestLogger + + def log_message(self, format: str, *args) -> None: # noqa: A003 + return + + def do_GET(self) -> None: # noqa: N802 + self._handle() + + def do_POST(self) -> None: # noqa: N802 + self._handle() + + def do_PUT(self) -> None: # noqa: N802 + self._handle() + + def do_PATCH(self) -> None: # noqa: N802 + self._handle() + + def do_DELETE(self) -> None: # noqa: N802 + self._handle() + + def do_OPTIONS(self) -> None: # noqa: N802 + self._handle() + + def _read_body(self) -> bytes: + transfer_encoding = (self.headers.get("Transfer-Encoding") or "").lower() + if "chunked" in transfer_encoding: + chunks: list[bytes] = [] + while True: + line = self.rfile.readline().strip() + if not line: + continue + size = int(line.split(b";", 1)[0], 16) + if size == 0: + while True: + trailer = self.rfile.readline() + if trailer in (b"\r\n", b"\n", b""): + break + break + chunks.append(self.rfile.read(size)) + self.rfile.read(2) # trailing CRLF + return b"".join(chunks) + + content_length = self.headers.get("Content-Length") + if not content_length: + return b"" + + try: + length = int(content_length) + except ValueError: + return b"" + + return self.rfile.read(length) + + def _target_path(self) -> str: + parsed = urlsplit(self.path) + incoming_path = normalize_path(parsed.path or "/") + + prefix = self.local_prefix or "" + if prefix and incoming_path.startswith(prefix): + remaining = incoming_path[len(prefix) :] + if not remaining: + remaining = "/" + else: + remaining = incoming_path + + forward_path = join_paths(self.upstream_base_path, remaining) + if parsed.query: + forward_path += f"?{parsed.query}" + return forward_path + + def _forward_headers(self) -> dict[str, str]: + headers: dict[str, str] = {} + for key, value in self.headers.items(): + if key.lower() in HOP_BY_HOP_HEADERS: + continue + headers[key] = value + + headers["Host"] = ( + self.upstream_host + if self.upstream_port in (80, 443) + else f"{self.upstream_host}:{self.upstream_port}" + ) + headers["Accept-Encoding"] = "identity" + headers.pop("Content-Length", None) + return headers + + def _send_chunk(self, data: bytes) -> None: + if not data: + return + self.wfile.write(f"{len(data):X}\r\n".encode("ascii")) + self.wfile.write(data) + self.wfile.write(b"\r\n") + + def _handle(self) -> None: + request_id = self.logger.next_id() + body = self._read_body() + parsed = urlsplit(self.path) + forward_path = self._target_path() + forward_headers = self._forward_headers() + + request_header_lines = "\n".join(f" {k}: {v}" for k, v in self.headers.items()) or " " + body_text = format_body(body, self.headers.get("Content-Type")) + + log_entry = ( + "=" * 80 + + f"\nREQUEST #{request_id}\n" + + f"Time: {utc_now()}\n" + + f"Client: {self.client_address[0]}:{self.client_address[1]}\n" + + f"Method: {self.command}\n" + + f"Incoming path: {self.path}\n" + + f"Forward to: {self.upstream_scheme}://{self.upstream_host}:{self.upstream_port}{forward_path}\n" + + "\nHeaders:\n" + + f"{request_header_lines}\n" + + "\nBody:\n" + + f"{body_text}\n" + + "\n" + ) + self.logger.write(log_entry) + + conn_cls = http.client.HTTPSConnection if self.upstream_scheme == "https" else http.client.HTTPConnection + conn = conn_cls(self.upstream_host, self.upstream_port, timeout=120) + + try: + conn.request(self.command, forward_path, body=body if body else None, headers=forward_headers) + resp = conn.getresponse() + + self.send_response(resp.status, resp.reason) + for name, value in resp.getheaders(): + lower = name.lower() + if lower in HOP_BY_HOP_HEADERS or lower == "content-length": + continue + self.send_header(name, value) + + # Use chunked transfer so SSE / streaming responses stay live. + self.send_header("Transfer-Encoding", "chunked") + self.end_headers() + + while True: + chunk = resp.read(8192) + if not chunk: + break + self._send_chunk(chunk) + self.wfile.flush() + + self.wfile.write(b"0\r\n\r\n") + self.wfile.flush() + resp.close() + finally: + conn.close() + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Capture RooCode requests and write request headers/body to log.txt", + ) + parser.add_argument( + "--listen-host", + default="127.0.0.1", + help="Proxy listen host (default: 127.0.0.1)", + ) + parser.add_argument( + "--listen-port", + type=int, + default=8000, + help="Proxy listen port (default: 8000)", + ) + parser.add_argument( + "--upstream", + default="https://api.openai.com/v1", + help="Upstream base URL (default: https://api.openai.com/v1)", + ) + parser.add_argument( + "--local-prefix", + default="/v1", + help="Path prefix RooCode will send to this proxy (default: /v1)", + ) + parser.add_argument( + "--log-file", + default="log.txt", + help="Log file path (default: log.txt)", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + parsed = urlsplit(args.upstream) + if parsed.scheme not in {"http", "https"}: + raise SystemExit("--upstream must start with http:// or https://") + if not parsed.hostname: + raise SystemExit("--upstream must include a host") + + ProxyHandler.upstream_scheme = parsed.scheme + ProxyHandler.upstream_host = parsed.hostname + ProxyHandler.upstream_port = parsed.port or (443 if parsed.scheme == "https" else 80) + ProxyHandler.upstream_base_path = parsed.path or "/" + ProxyHandler.local_prefix = normalize_path(args.local_prefix) + ProxyHandler.logger = RequestLogger(Path(args.log_file).resolve()) + + server = ThreadingHTTPServer((args.listen_host, args.listen_port), ProxyHandler) + print(f"Proxy listening on http://{args.listen_host}:{args.listen_port}{ProxyHandler.local_prefix}") + print(f"Forwarding to {args.upstream}") + print(f"Logging to {Path(args.log_file).resolve()}") + print("Set RooCode base URL to http://127.0.0.1:/v1 and keep your real API key in RooCode.") + try: + server.serve_forever() + except KeyboardInterrupt: + print("\nStopped.") + return 0 + finally: + server.server_close() + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/api/providers/__tests__/openai-compatible.spec.ts b/src/api/providers/__tests__/openai-compatible.spec.ts new file mode 100644 index 00000000000..93bc5c94786 --- /dev/null +++ b/src/api/providers/__tests__/openai-compatible.spec.ts @@ -0,0 +1,216 @@ +// npx vitest run src/api/providers/__tests__/openai-compatible.spec.ts + +const { mockStreamText, mockGenerateText, mockCreateOpenAICompatible } = vi.hoisted(() => ({ + mockStreamText: vi.fn(), + mockGenerateText: vi.fn(), + mockCreateOpenAICompatible: vi.fn(), +})) + +let capturedProviderConfig: any + +vi.mock("ai", () => ({ + streamText: mockStreamText, + generateText: mockGenerateText, +})) + +vi.mock("@ai-sdk/openai-compatible", () => ({ + createOpenAICompatible: mockCreateOpenAICompatible, +})) + +import type { Anthropic } from "@anthropic-ai/sdk" + +import type { ModelInfo, ReasoningEffortExtended } from "@roo-code/types" + +import type { ApiHandlerOptions } from "../../../shared/api" + +import type { OpenAICompatibleConfig } from "../openai-compatible" +import { OpenAICompatibleHandler } from "../openai-compatible" + +const testModelInfo: ModelInfo = { + maxTokens: 4096, + contextWindow: 128000, + supportsImages: false, + supportsPromptCache: false, + inputPrice: 0.5, + outputPrice: 1.5, + supportsReasoningEffort: ["low", "medium", "high", "xhigh"], +} + +class TestOpenAICompatibleHandler extends OpenAICompatibleHandler { + private resolvedReasoningEffort: ReasoningEffortExtended | undefined + + constructor(options: ApiHandlerOptions, reasoningEffort?: ReasoningEffortExtended) { + const config: OpenAICompatibleConfig = { + providerName: "test-provider", + baseURL: "https://test.example.com/v1", + apiKey: "test-api-key", + modelId: "test-model", + modelInfo: testModelInfo, + temperature: 0, + } + + super(options, config) + this.resolvedReasoningEffort = reasoningEffort + } + + override getModel() { + return { + id: "test-model", + info: testModelInfo, + maxTokens: 2048, + temperature: 0, + reasoningEffort: this.resolvedReasoningEffort, + } + } +} + +const systemPrompt = "You are helpful." +const messages: Anthropic.Messages.MessageParam[] = [ + { + role: "user", + content: [ + { + type: "text", + text: "Hello", + }, + ], + }, +] + +function buildEmptyStreamResult() { + return { + fullStream: (async function* () { + // drain + })(), + usage: Promise.resolve(undefined), + } +} + +describe("OpenAICompatibleHandler reasoning payload", () => { + beforeEach(() => { + vi.clearAllMocks() + capturedProviderConfig = undefined + mockCreateOpenAICompatible.mockImplementation((config: any) => { + capturedProviderConfig = config + return vi.fn((modelId: string) => ({ + modelId, + provider: "test-provider", + })) + }) + mockStreamText.mockReturnValue(buildEmptyStreamResult()) + mockGenerateText.mockResolvedValue({ text: "done" }) + }) + + it("reasoning payload createMessage passes providerOptions and transform adds both fields", async () => { + const handler = new TestOpenAICompatibleHandler({ apiModelId: "test-model" } as ApiHandlerOptions, "high") + + for await (const _chunk of handler.createMessage(systemPrompt, messages)) { + // drain + } + + expect(mockStreamText).toHaveBeenCalledWith( + expect.objectContaining({ + providerOptions: { + openaiCompatible: { + reasoningEffort: "high", + }, + }, + }), + ) + + const transformed = capturedProviderConfig.transformRequestBody({ + model: "test-model", + reasoning_effort: "high", + messages: [], + }) + + expect(transformed).toEqual( + expect.objectContaining({ + reasoning_effort: "high", + reasoning: { + effort: "high", + summary: "auto", + }, + }), + ) + }) + + it("reasoning payload completePrompt preserves xhigh in providerOptions and transform", async () => { + const handler = new TestOpenAICompatibleHandler({ apiModelId: "test-model" } as ApiHandlerOptions, "xhigh") + + await handler.completePrompt("Hello") + + expect(mockGenerateText).toHaveBeenCalledWith( + expect.objectContaining({ + providerOptions: { + openaiCompatible: { + reasoningEffort: "xhigh", + }, + }, + }), + ) + + const transformed = capturedProviderConfig.transformRequestBody({ + model: "test-model", + reasoning_effort: "xhigh", + prompt: "Hello", + }) + + expect(transformed).toEqual( + expect.objectContaining({ + reasoning_effort: "xhigh", + reasoning: { + effort: "xhigh", + summary: "auto", + }, + }), + ) + }) + + it("reasoning payload omits providerOptions and leaves body unchanged when reasoning disabled", async () => { + const handler = new TestOpenAICompatibleHandler({ apiModelId: "test-model" } as ApiHandlerOptions) + + for await (const _chunk of handler.createMessage(systemPrompt, messages)) { + // drain + } + + const callArgs = mockStreamText.mock.calls[0][0] + expect(callArgs.providerOptions).toBeUndefined() + + const inputBody = { + model: "test-model", + messages: [], + } + const transformed = capturedProviderConfig.transformRequestBody(inputBody) + expect(transformed).toEqual(inputBody) + expect(transformed.reasoning_effort).toBeUndefined() + expect(transformed.reasoning).toBeUndefined() + }) + + it("reasoning payload overwrites effort and summary but keeps existing reasoning keys", () => { + new TestOpenAICompatibleHandler({ apiModelId: "test-model" } as ApiHandlerOptions, "high") + + const transformed = capturedProviderConfig.transformRequestBody({ + model: "test-model", + reasoning_effort: "high", + reasoning: { + foo: "bar", + effort: "low", + summary: "manual", + }, + messages: [], + }) + + expect(transformed).toEqual( + expect.objectContaining({ + reasoning_effort: "high", + reasoning: { + foo: "bar", + effort: "high", + summary: "auto", + }, + messages: [], + }), + ) + }) +}) diff --git a/src/api/providers/openai-compatible.ts b/src/api/providers/openai-compatible.ts index d129e72452f..b0f287b072a 100644 --- a/src/api/providers/openai-compatible.ts +++ b/src/api/providers/openai-compatible.ts @@ -8,7 +8,7 @@ import OpenAI from "openai" import { createOpenAICompatible } from "@ai-sdk/openai-compatible" import { streamText, generateText, LanguageModel, ToolSet } from "ai" -import type { ModelInfo } from "@roo-code/types" +import type { ModelInfo, ReasoningEffortExtended } from "@roo-code/types" import type { ApiHandlerOptions } from "../../shared/api" @@ -19,6 +19,35 @@ import { DEFAULT_HEADERS } from "./constants" import { BaseProvider } from "./base-provider" import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index" +type OpenAICompatibleResolvedModel = { + id: string + info: ModelInfo + maxTokens?: number + temperature?: number + reasoningEffort?: ReasoningEffortExtended +} + +type OpenAICompatibleProviderOptions = NonNullable[0]["providerOptions"]> & { + openaiCompatible?: { + reasoningEffort?: ReasoningEffortExtended + } +} + +function transformOpenAICompatibleReasoningBody(body: Record): Record { + if (body.reasoning_effort === undefined) { + return body + } + + return { + ...body, + reasoning: { + ...(typeof body.reasoning === "object" && body.reasoning !== null ? body.reasoning : {}), + effort: body.reasoning_effort, + summary: "auto" as const, + }, + } +} + /** * Configuration options for creating an OpenAI-compatible provider. */ @@ -41,6 +70,8 @@ export interface OpenAICompatibleConfig { modelMaxTokens?: number /** Temperature setting */ temperature?: number + /** Optional request transformer applied by AI SDK before sending */ + transformRequestBody?: (args: Record) => Record } /** @@ -66,6 +97,7 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si ...DEFAULT_HEADERS, ...(config.headers || {}), }, + transformRequestBody: config.transformRequestBody ?? transformOpenAICompatibleReasoningBody, }) } @@ -76,10 +108,22 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si return this.provider(this.config.modelId) } + protected getProviderOptions(model: OpenAICompatibleResolvedModel): OpenAICompatibleProviderOptions | undefined { + if (!model.reasoningEffort) { + return undefined + } + + return { + openaiCompatible: { + reasoningEffort: model.reasoningEffort, + }, + } + } + /** * Get the model information. Must be implemented by subclasses. */ - abstract override getModel(): { id: string; info: ModelInfo; maxTokens?: number; temperature?: number } + abstract override getModel(): OpenAICompatibleResolvedModel /** * Process usage metrics from the AI SDK response. @@ -165,6 +209,8 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si const openAiTools = this.convertToolsForOpenAI(metadata?.tools) const aiSdkTools = convertToolsForAiSdk(openAiTools) as ToolSet | undefined + const providerOptions = this.getProviderOptions(model) + // Build the request options const requestOptions: Parameters[0] = { model: languageModel, @@ -174,6 +220,7 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si maxOutputTokens: this.getMaxOutputTokens(), tools: aiSdkTools, toolChoice: this.mapToolChoice(metadata?.tool_choice), + ...(providerOptions ? { providerOptions } : {}), } // Use streamText for streaming responses @@ -198,13 +245,16 @@ export abstract class OpenAICompatibleHandler extends BaseProvider implements Si * Complete a prompt using the AI SDK generateText. */ async completePrompt(prompt: string): Promise { + const model = this.getModel() const languageModel = this.getLanguageModel() + const providerOptions = this.getProviderOptions(model) const { text } = await generateText({ model: languageModel, prompt, maxOutputTokens: this.getMaxOutputTokens(), - temperature: this.config.temperature ?? 0, + temperature: model.temperature ?? this.config.temperature ?? 0, + ...(providerOptions ? { providerOptions } : {}), }) return text diff --git a/src/package.json b/src/package.json index cb3b93d1602..d994edaae54 100644 --- a/src/package.json +++ b/src/package.json @@ -3,7 +3,7 @@ "displayName": "%extension.displayName%", "description": "%extension.description%", "publisher": "RooVeterinaryInc", - "version": "3.53.0", + "version": "3.53.1", "icon": "assets/icons/icon.png", "galleryBanner": { "color": "#617A91", diff --git a/webview-ui/src/components/chat/__tests__/Announcement.spec.tsx b/webview-ui/src/components/chat/__tests__/Announcement.spec.tsx index 84254ae9e9c..112bbede979 100644 --- a/webview-ui/src/components/chat/__tests__/Announcement.spec.tsx +++ b/webview-ui/src/components/chat/__tests__/Announcement.spec.tsx @@ -12,7 +12,7 @@ vi.mock("@src/utils/vscode", () => ({ vi.mock("@roo/package", () => ({ Package: { - version: "3.53.0", + version: "3.53.1", }, })) @@ -55,7 +55,7 @@ describe("Announcement", () => { it("renders the v3.53.0 announcement title and highlights", () => { render() - expect(screen.getByText("Roo Code 3.53.0 Released")).toBeInTheDocument() + expect(screen.getByText("Roo Code 3.53.1 Released")).toBeInTheDocument() expect( screen.getByText( "GPT-5.5 via OpenAI Codex: Added GPT-5.5 support in the OpenAI Codex provider so you can use the latest model straight from Roo Code.",