Skip to content

Commit afa062d

Browse files
mjdouglasclaude
andcommitted
feat(opencode): support custom system prompt per model in config
Allow custom models to define their own system prompt via the `prompt` field in provider model config, overriding the default model-family matching logic in SystemPrompt.provider(). Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent f2d3a4c commit afa062d

5 files changed

Lines changed: 206 additions & 0 deletions

File tree

packages/opencode/src/config/config.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -983,6 +983,7 @@ export namespace Config {
983983
.record(
984984
z.string(),
985985
ModelsDev.Model.partial().extend({
986+
prompt: z.string().optional().describe("Custom system prompt for this model"),
986987
variants: z
987988
.record(
988989
z.string(),

packages/opencode/src/provider/provider.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -734,6 +734,7 @@ export namespace Provider {
734734
options: z.record(z.string(), z.any()),
735735
headers: z.record(z.string(), z.string()),
736736
release_date: z.string(),
737+
prompt: z.string().optional(),
737738
variants: z.record(z.string(), z.record(z.string(), z.any())).optional(),
738739
})
739740
.meta({
@@ -961,6 +962,7 @@ export namespace Provider {
961962
headers: mergeDeep(existingModel?.headers ?? {}, model.headers ?? {}),
962963
family: model.family ?? existingModel?.family ?? "",
963964
release_date: model.release_date ?? existingModel?.release_date ?? "",
965+
prompt: model.prompt ?? existingModel?.prompt,
964966
variants: {},
965967
}
966968
const merged = mergeDeep(ProviderTransform.variants(parsedModel), model.variants ?? {})

packages/opencode/src/session/system.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ export namespace SystemPrompt {
2020
}
2121

2222
export function provider(model: Provider.Model) {
23+
if (model.prompt) return [model.prompt]
2324
if (model.api.id.includes("gpt-5")) return [PROMPT_CODEX]
2425
if (model.api.id.includes("gpt-") || model.api.id.includes("o1") || model.api.id.includes("o3"))
2526
return [PROMPT_BEAST]

packages/opencode/test/provider/provider.test.ts

Lines changed: 106 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2247,6 +2247,112 @@ test("cloudflare-ai-gateway loads with env variables", async () => {
22472247
})
22482248
})
22492249

2250+
test("custom model prompt is stored on model", async () => {
2251+
await using tmp = await tmpdir({
2252+
init: async (dir) => {
2253+
await Bun.write(
2254+
path.join(dir, "opencode.json"),
2255+
JSON.stringify({
2256+
$schema: "https://opencode.ai/config.json",
2257+
provider: {
2258+
"custom-prompt-provider": {
2259+
name: "Custom Prompt Provider",
2260+
npm: "@ai-sdk/openai-compatible",
2261+
env: [],
2262+
models: {
2263+
"custom-model": {
2264+
name: "Custom Model",
2265+
tool_call: true,
2266+
limit: { context: 128000, output: 4096 },
2267+
prompt: "You are a custom coding assistant.",
2268+
},
2269+
},
2270+
options: { apiKey: "test-key" },
2271+
},
2272+
},
2273+
}),
2274+
)
2275+
},
2276+
})
2277+
await Instance.provide({
2278+
directory: tmp.path,
2279+
fn: async () => {
2280+
const providers = await Provider.list()
2281+
const model = providers[ProviderID.make("custom-prompt-provider")].models["custom-model"]
2282+
expect(model.prompt).toBe("You are a custom coding assistant.")
2283+
},
2284+
})
2285+
})
2286+
2287+
test("model prompt is undefined when not specified", async () => {
2288+
await using tmp = await tmpdir({
2289+
init: async (dir) => {
2290+
await Bun.write(
2291+
path.join(dir, "opencode.json"),
2292+
JSON.stringify({
2293+
$schema: "https://opencode.ai/config.json",
2294+
provider: {
2295+
"no-prompt-provider": {
2296+
name: "No Prompt Provider",
2297+
npm: "@ai-sdk/openai-compatible",
2298+
env: [],
2299+
models: {
2300+
"no-prompt-model": {
2301+
name: "No Prompt Model",
2302+
tool_call: true,
2303+
limit: { context: 128000, output: 4096 },
2304+
},
2305+
},
2306+
options: { apiKey: "test-key" },
2307+
},
2308+
},
2309+
}),
2310+
)
2311+
},
2312+
})
2313+
await Instance.provide({
2314+
directory: tmp.path,
2315+
fn: async () => {
2316+
const providers = await Provider.list()
2317+
const model = providers[ProviderID.make("no-prompt-provider")].models["no-prompt-model"]
2318+
expect(model.prompt).toBeUndefined()
2319+
},
2320+
})
2321+
})
2322+
2323+
test("model prompt overrides existing model prompt", async () => {
2324+
await using tmp = await tmpdir({
2325+
init: async (dir) => {
2326+
await Bun.write(
2327+
path.join(dir, "opencode.json"),
2328+
JSON.stringify({
2329+
$schema: "https://opencode.ai/config.json",
2330+
provider: {
2331+
anthropic: {
2332+
models: {
2333+
"claude-sonnet-4-20250514": {
2334+
prompt: "You are a specialized Anthropic assistant.",
2335+
},
2336+
},
2337+
},
2338+
},
2339+
}),
2340+
)
2341+
},
2342+
})
2343+
await Instance.provide({
2344+
directory: tmp.path,
2345+
init: async () => {
2346+
Env.set("ANTHROPIC_API_KEY", "test-api-key")
2347+
},
2348+
fn: async () => {
2349+
const providers = await Provider.list()
2350+
const model = providers[ProviderID.anthropic].models["claude-sonnet-4-20250514"]
2351+
expect(model.prompt).toBe("You are a specialized Anthropic assistant.")
2352+
},
2353+
})
2354+
})
2355+
22502356
test("cloudflare-ai-gateway forwards config metadata options", async () => {
22512357
await using tmp = await tmpdir({
22522358
init: async (dir) => {
Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,96 @@
1+
import { describe, expect, test } from "bun:test"
2+
import { SystemPrompt } from "../../src/session/system"
3+
import type { Provider } from "../../src/provider/provider"
4+
5+
function makeModel(overrides: Partial<Provider.Model> & { api: Provider.Model["api"] }): Provider.Model {
6+
const { api, ...rest } = overrides
7+
return {
8+
id: "test-model" as any,
9+
providerID: "test-provider" as any,
10+
name: "Test Model",
11+
api,
12+
capabilities: {
13+
temperature: false,
14+
reasoning: false,
15+
attachment: false,
16+
toolcall: true,
17+
input: { text: true, audio: false, image: false, video: false, pdf: false },
18+
output: { text: true, audio: false, image: false, video: false, pdf: false },
19+
interleaved: false,
20+
},
21+
cost: { input: 0, output: 0, cache: { read: 0, write: 0 } },
22+
limit: { context: 128000, output: 4096 },
23+
status: "active",
24+
options: {},
25+
headers: {},
26+
release_date: "2025-01-01",
27+
...rest,
28+
}
29+
}
30+
31+
describe("SystemPrompt.provider", () => {
32+
test("returns custom prompt when model has prompt field", () => {
33+
const model = makeModel({
34+
api: { id: "some-unknown-model", url: "", npm: "" },
35+
prompt: "You are a custom coding assistant.",
36+
})
37+
const result = SystemPrompt.provider(model)
38+
expect(result).toEqual(["You are a custom coding assistant."])
39+
})
40+
41+
test("custom prompt takes priority over model ID matching", () => {
42+
const model = makeModel({
43+
api: { id: "claude-sonnet-4", url: "", npm: "" },
44+
prompt: "Custom prompt overrides claude matching.",
45+
})
46+
const result = SystemPrompt.provider(model)
47+
expect(result).toEqual(["Custom prompt overrides claude matching."])
48+
})
49+
50+
test("falls back to claude prompt when no custom prompt and model ID contains claude", () => {
51+
const model = makeModel({
52+
api: { id: "claude-sonnet-4", url: "", npm: "" },
53+
})
54+
const result = SystemPrompt.provider(model)
55+
expect(result.length).toBe(1)
56+
expect(result[0]).not.toBe("")
57+
// Should not be the fallback prompt (qwen)
58+
const fallbackModel = makeModel({
59+
api: { id: "some-unknown-model", url: "", npm: "" },
60+
})
61+
const fallback = SystemPrompt.provider(fallbackModel)
62+
expect(result[0]).not.toBe(fallback[0])
63+
})
64+
65+
test("falls back to gemini prompt for gemini models", () => {
66+
const model = makeModel({
67+
api: { id: "gemini-pro", url: "", npm: "" },
68+
})
69+
const result = SystemPrompt.provider(model)
70+
expect(result.length).toBe(1)
71+
expect(result[0]).not.toBe("")
72+
})
73+
74+
test("falls back to default prompt for unknown models without custom prompt", () => {
75+
const model = makeModel({
76+
api: { id: "totally-unknown-model", url: "", npm: "" },
77+
})
78+
const result = SystemPrompt.provider(model)
79+
expect(result.length).toBe(1)
80+
expect(result[0]).not.toBe("")
81+
})
82+
83+
test("model without prompt field uses default matching", () => {
84+
const model = makeModel({
85+
api: { id: "gpt-5-turbo", url: "", npm: "" },
86+
})
87+
const result = SystemPrompt.provider(model)
88+
expect(result.length).toBe(1)
89+
// gpt-5 should match PROMPT_CODEX, not the fallback
90+
const fallbackModel = makeModel({
91+
api: { id: "unknown", url: "", npm: "" },
92+
})
93+
const fallback = SystemPrompt.provider(fallbackModel)
94+
expect(result[0]).not.toBe(fallback[0])
95+
})
96+
})

0 commit comments

Comments
 (0)