Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ No port forwarding. No VPN. Just message and code.

Connect via **6 platforms** : WhatsApp, Telegram, Discord, Slack, Microsoft Teams, and Signal.

### 9 AI Providers
### 10 AI Providers

Anthropic, OpenAI, Google Gemini, Mistral, Moonshot, MiniMax, xAI Grok, HuggingFace, and OpenRouter. Instantly switch providers with `/switch` directly from your messaging app.
Anthropic, OpenAI, Google Gemini, Mistral, Moonshot, MiniMax, xAI Grok, Z.ai (Zhipu), HuggingFace, and OpenRouter. Instantly switch providers with `/switch` directly from your messaging app.

### Hot-Switching

Expand Down Expand Up @@ -141,7 +141,7 @@ docker run -it \

## 🤖 AI Providers

txtcode supports **9 LLM providers** for chat mode. Configure one or more during setup and hot-switch with `/switch`.
txtcode supports **10 LLM providers** for chat mode. Configure one or more during setup and hot-switch with `/switch`.

| Provider | Example Models | Notes |
| :------------------ | :----------------------------------------- | :-------------------------- |
Expand All @@ -152,6 +152,7 @@ txtcode supports **9 LLM providers** for chat mode. Configure one or more during
| **Moonshot (Kimi)** | `kimi-k2.5`, `moonshot-v1-128k` | Long-context models |
| **MiniMax** | `MiniMax-M2.5`, `MiniMax-M2.1` | MiniMax family |
| **xAI (Grok)** | `grok-4`, `grok-3-fast` | Grok family |
| **Z.ai (Zhipu)** | `glm-5`, `glm-4.7`, `glm-4.6` | GLM family |
| **HuggingFace** | _Discovered at runtime_ | Inference Providers API |
| **OpenRouter** | _Discovered at runtime_ | Unified API for 100+ models |

Expand Down
3 changes: 3 additions & 0 deletions src/core/router.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import { processWithMoonshot } from "../providers/moonshot";
import { processWithOpenAI } from "../providers/openai";
import { processWithOpenRouter } from "../providers/openrouter";
import { processWithXAI } from "../providers/xai";
import { processWithZAI } from "../providers/zai";
import { logger } from "../shared/logger";
import { IDEAdapter, ModelInfo } from "../shared/types";
import { ContextManager } from "./context-manager";
Expand Down Expand Up @@ -146,6 +147,8 @@ export class Router {
return await processWithMistral(instruction, this.apiKey, this.model);
case "xai":
return await processWithXAI(instruction, this.apiKey, this.model);
case "zai":
return await processWithZAI(instruction, this.apiKey, this.model);
default:
return `[ERROR] Unsupported AI provider: ${this.provider}. Run: txtcode config`;
}
Expand Down
21 changes: 21 additions & 0 deletions src/data/zai_models.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
{
"name": "Z.ai",
"models": [
{
"id": "glm-5",
"name": "GLM-5",
"description": "Flagship foundation model for agentic engineering",
"recommended": true
},
{
"id": "glm-4.7",
"name": "GLM-4.7",
"description": "High-performance general-purpose model"
},
{
"id": "glm-4.6",
"name": "GLM-4.6",
"description": "Balanced model for chat and reasoning tasks"
}
]
}
44 changes: 44 additions & 0 deletions src/providers/zai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import OpenAI from "openai";
import { logger } from "../shared/logger";

const SYSTEM_PROMPT =
"You are TxtCode AI — a helpful, knowledgeable coding assistant accessible via messaging. Be concise, use markdown for clarity, and suggest /code mode for deep coding work.";

export async function processWithZAI(
instruction: string,
apiKey: string,
model: string,
): Promise<string> {
const startTime = Date.now();
logger.debug(`[ZAI] Request → model=${model}, prompt=${instruction.length} chars`);

try {
const client = new OpenAI({
apiKey,
baseURL: "https://api.z.ai/api/paas/v4",
});

const completion = await client.chat.completions.create({
model,
max_tokens: 4096,
messages: [
{ role: "system", content: SYSTEM_PROMPT },
{ role: "user", content: instruction },
],
});

const choice = completion.choices[0];

logger.debug(
`[ZAI] Done in ${Date.now() - startTime}ms, ` +
`tokens=${completion.usage?.prompt_tokens ?? "?"}in/${completion.usage?.completion_tokens ?? "?"}out`,
);

return choice.message.content || "No response from Z.ai";
} catch (error: unknown) {
logger.error(`[ZAI] API error after ${Date.now() - startTime}ms`, error);
throw new Error(`Z.ai API error: ${error instanceof Error ? error.message : "Unknown error"}`, {
cause: error,
});
}
}
6 changes: 6 additions & 0 deletions test/unit/router.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,12 @@ describe("Router", () => {
expect(result).toContain("[ERROR]");
expect(result).toContain("Unsupported");
});

it("routes to zai provider without throwing", async () => {
router.updateProvider("zai", "test-key", "glm-5");
expect(router.getProviderName()).toBe("zai");
expect(router.getCurrentModel()).toBe("glm-5");
});
});

describe("abortCurrentCommand", () => {
Expand Down
43 changes: 43 additions & 0 deletions test/unit/zai-provider.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import { describe, it, expect, beforeEach } from "vitest";
import { loadModelsCatalog, clearCatalogCache } from "../../src/utils/models-catalog-loader";

describe("Z.ai provider", () => {
beforeEach(() => {
clearCatalogCache();
});

describe("model catalog", () => {
it("is discovered by the catalog loader", () => {
const catalog = loadModelsCatalog();
expect(catalog.providers.zai).toBeDefined();
});

it("has correct provider name", () => {
const catalog = loadModelsCatalog();
expect(catalog.providers.zai.name).toBe("Z.ai");
});

it("includes all expected models", () => {
const catalog = loadModelsCatalog();
const ids = catalog.providers.zai.models.map((m) => m.id);
expect(ids).toContain("glm-5");
expect(ids).toContain("glm-4.7");
expect(ids).toContain("glm-4.6");
});

it("has glm-5 marked as recommended", () => {
const catalog = loadModelsCatalog();
const glm5 = catalog.providers.zai.models.find((m) => m.id === "glm-5");
expect(glm5).toBeDefined();
expect(glm5!.recommended).toBe(true);
});

it("every model has a description", () => {
const catalog = loadModelsCatalog();
for (const model of catalog.providers.zai.models) {
expect(model.description).toBeDefined();
expect(model.description.length).toBeGreaterThan(0);
}
});
});
});