diff --git a/README.md b/README.md index 81e7406..e4beed6 100644 --- a/README.md +++ b/README.md @@ -51,9 +51,9 @@ No port forwarding. No VPN. Just message and code. Connect via **6 platforms** : WhatsApp, Telegram, Discord, Slack, Microsoft Teams, and Signal. -### 9 AI Providers +### 10 AI Providers -Anthropic, OpenAI, Google Gemini, Mistral, Moonshot, MiniMax, xAI Grok, HuggingFace, and OpenRouter. Instantly switch providers with `/switch` directly from your messaging app. +Anthropic, OpenAI, Google Gemini, Mistral, Moonshot, MiniMax, xAI Grok, Z.ai (Zhipu), HuggingFace, and OpenRouter. Instantly switch providers with `/switch` directly from your messaging app. ### Hot-Switching @@ -141,7 +141,7 @@ docker run -it \ ## 🤖 AI Providers -txtcode supports **9 LLM providers** for chat mode. Configure one or more during setup and hot-switch with `/switch`. +txtcode supports **10 LLM providers** for chat mode. Configure one or more during setup and hot-switch with `/switch`. | Provider | Example Models | Notes | | :------------------ | :----------------------------------------- | :-------------------------- | @@ -152,6 +152,7 @@ txtcode supports **9 LLM providers** for chat mode. Configure one or more during | **Moonshot (Kimi)** | `kimi-k2.5`, `moonshot-v1-128k` | Long-context models | | **MiniMax** | `MiniMax-M2.5`, `MiniMax-M2.1` | MiniMax family | | **xAI (Grok)** | `grok-4`, `grok-3-fast` | Grok family | +| **Z.ai (Zhipu)** | `glm-5`, `glm-4.7`, `glm-4.6` | GLM family | | **HuggingFace** | _Discovered at runtime_ | Inference Providers API | | **OpenRouter** | _Discovered at runtime_ | Unified API for 100+ models | diff --git a/src/core/router.ts b/src/core/router.ts index 8f2ac99..b49869d 100644 --- a/src/core/router.ts +++ b/src/core/router.ts @@ -14,6 +14,7 @@ import { processWithMoonshot } from "../providers/moonshot"; import { processWithOpenAI } from "../providers/openai"; import { processWithOpenRouter } from "../providers/openrouter"; import { processWithXAI } from "../providers/xai"; +import { processWithZAI } from "../providers/zai"; import { logger } from "../shared/logger"; import { IDEAdapter, ModelInfo } from "../shared/types"; import { ContextManager } from "./context-manager"; @@ -146,6 +147,8 @@ export class Router { return await processWithMistral(instruction, this.apiKey, this.model); case "xai": return await processWithXAI(instruction, this.apiKey, this.model); + case "zai": + return await processWithZAI(instruction, this.apiKey, this.model); default: return `[ERROR] Unsupported AI provider: ${this.provider}. Run: txtcode config`; } diff --git a/src/data/zai_models.json b/src/data/zai_models.json new file mode 100644 index 0000000..93298b2 --- /dev/null +++ b/src/data/zai_models.json @@ -0,0 +1,21 @@ +{ + "name": "Z.ai", + "models": [ + { + "id": "glm-5", + "name": "GLM-5", + "description": "Flagship foundation model for agentic engineering", + "recommended": true + }, + { + "id": "glm-4.7", + "name": "GLM-4.7", + "description": "High-performance general-purpose model" + }, + { + "id": "glm-4.6", + "name": "GLM-4.6", + "description": "Balanced model for chat and reasoning tasks" + } + ] +} diff --git a/src/providers/zai.ts b/src/providers/zai.ts new file mode 100644 index 0000000..03ba9f7 --- /dev/null +++ b/src/providers/zai.ts @@ -0,0 +1,44 @@ +import OpenAI from "openai"; +import { logger } from "../shared/logger"; + +const SYSTEM_PROMPT = + "You are TxtCode AI — a helpful, knowledgeable coding assistant accessible via messaging. Be concise, use markdown for clarity, and suggest /code mode for deep coding work."; + +export async function processWithZAI( + instruction: string, + apiKey: string, + model: string, +): Promise { + const startTime = Date.now(); + logger.debug(`[ZAI] Request → model=${model}, prompt=${instruction.length} chars`); + + try { + const client = new OpenAI({ + apiKey, + baseURL: "https://api.z.ai/api/paas/v4", + }); + + const completion = await client.chat.completions.create({ + model, + max_tokens: 4096, + messages: [ + { role: "system", content: SYSTEM_PROMPT }, + { role: "user", content: instruction }, + ], + }); + + const choice = completion.choices[0]; + + logger.debug( + `[ZAI] Done in ${Date.now() - startTime}ms, ` + + `tokens=${completion.usage?.prompt_tokens ?? "?"}in/${completion.usage?.completion_tokens ?? "?"}out`, + ); + + return choice.message.content || "No response from Z.ai"; + } catch (error: unknown) { + logger.error(`[ZAI] API error after ${Date.now() - startTime}ms`, error); + throw new Error(`Z.ai API error: ${error instanceof Error ? error.message : "Unknown error"}`, { + cause: error, + }); + } +} diff --git a/test/unit/router.test.ts b/test/unit/router.test.ts index 923fd4f..410e299 100644 --- a/test/unit/router.test.ts +++ b/test/unit/router.test.ts @@ -181,6 +181,12 @@ describe("Router", () => { expect(result).toContain("[ERROR]"); expect(result).toContain("Unsupported"); }); + + it("routes to zai provider without throwing", async () => { + router.updateProvider("zai", "test-key", "glm-5"); + expect(router.getProviderName()).toBe("zai"); + expect(router.getCurrentModel()).toBe("glm-5"); + }); }); describe("abortCurrentCommand", () => { diff --git a/test/unit/zai-provider.test.ts b/test/unit/zai-provider.test.ts new file mode 100644 index 0000000..85e2973 --- /dev/null +++ b/test/unit/zai-provider.test.ts @@ -0,0 +1,43 @@ +import { describe, it, expect, beforeEach } from "vitest"; +import { loadModelsCatalog, clearCatalogCache } from "../../src/utils/models-catalog-loader"; + +describe("Z.ai provider", () => { + beforeEach(() => { + clearCatalogCache(); + }); + + describe("model catalog", () => { + it("is discovered by the catalog loader", () => { + const catalog = loadModelsCatalog(); + expect(catalog.providers.zai).toBeDefined(); + }); + + it("has correct provider name", () => { + const catalog = loadModelsCatalog(); + expect(catalog.providers.zai.name).toBe("Z.ai"); + }); + + it("includes all expected models", () => { + const catalog = loadModelsCatalog(); + const ids = catalog.providers.zai.models.map((m) => m.id); + expect(ids).toContain("glm-5"); + expect(ids).toContain("glm-4.7"); + expect(ids).toContain("glm-4.6"); + }); + + it("has glm-5 marked as recommended", () => { + const catalog = loadModelsCatalog(); + const glm5 = catalog.providers.zai.models.find((m) => m.id === "glm-5"); + expect(glm5).toBeDefined(); + expect(glm5!.recommended).toBe(true); + }); + + it("every model has a description", () => { + const catalog = loadModelsCatalog(); + for (const model of catalog.providers.zai.models) { + expect(model.description).toBeDefined(); + expect(model.description.length).toBeGreaterThan(0); + } + }); + }); +});