Skip to content

Commit 5a6e9f5

Browse files
Add OpenAI GPT-5.3 chat latest model support
Reference: - https://developers.openai.com/api/docs/models/gpt-5.3-chat-latest
1 parent 9a58f76 commit 5a6e9f5

4 files changed

Lines changed: 84 additions & 0 deletions

File tree

.changeset/gpt-5-3-chat-latest.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Add OpenAI's GPT-5.3-Chat-Latest model support

packages/types/src/providers/openai.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,18 @@ export const openAiNativeModels = {
9393
cacheReadsPrice: 0.175,
9494
description: "GPT-5.2 Chat: Optimized for conversational AI and chat use cases",
9595
},
96+
"gpt-5.3-chat-latest": {
97+
maxTokens: 16_384,
98+
contextWindow: 128_000,
99+
includedTools: ["apply_patch"],
100+
excludedTools: ["apply_diff", "write_to_file"],
101+
supportsImages: true,
102+
supportsPromptCache: true,
103+
inputPrice: 1.75,
104+
outputPrice: 14.0,
105+
cacheReadsPrice: 0.175,
106+
description: "GPT-5.3 Chat: Optimized for conversational AI and chat use cases",
107+
},
96108
"gpt-5.1": {
97109
maxTokens: 128000,
98110
contextWindow: 400000,

src/api/providers/__tests__/openai-native-usage.spec.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -378,6 +378,9 @@ describe("OpenAiNativeHandler - normalizeUsage", () => {
378378

379379
const fourOBody = buildRequestBodyForModel("gpt-4o")
380380
expect(fourOBody.prompt_cache_retention).toBeUndefined()
381+
382+
const chatModelBody = buildRequestBodyForModel("gpt-5.3-chat-latest")
383+
expect(chatModelBody.prompt_cache_retention).toBeUndefined()
381384
})
382385

383386
it("should not set prompt_cache_retention when the model does not support prompt caching", () => {

src/api/providers/__tests__/openai-native.spec.ts

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -249,6 +249,19 @@ describe("OpenAiNativeHandler", () => {
249249
expect(modelInfo.info.supportsReasoningEffort).toEqual(["low", "medium", "high", "xhigh"])
250250
})
251251

252+
it("should return GPT-5.3 Chat model info when selected", () => {
253+
const chatHandler = new OpenAiNativeHandler({
254+
...mockOptions,
255+
apiModelId: "gpt-5.3-chat-latest",
256+
})
257+
258+
const modelInfo = chatHandler.getModel()
259+
expect(modelInfo.id).toBe("gpt-5.3-chat-latest")
260+
expect(modelInfo.info.maxTokens).toBe(16_384)
261+
expect(modelInfo.info.contextWindow).toBe(128000)
262+
expect(modelInfo.info.supportsImages).toBe(true)
263+
})
264+
252265
it("should handle undefined model ID", () => {
253266
const handlerWithoutModel = new OpenAiNativeHandler({
254267
openAiNativeApiKey: "test-api-key",
@@ -345,6 +358,57 @@ describe("OpenAiNativeHandler", () => {
345358
expect(textChunks[1].text).toBe(" world")
346359
})
347360

361+
it("should handle GPT-5.3 Chat model with Responses API", async () => {
362+
// Mock fetch for Responses API
363+
const mockFetch = vitest.fn().mockResolvedValue({
364+
ok: true,
365+
body: new ReadableStream({
366+
start(controller) {
367+
controller.enqueue(
368+
new TextEncoder().encode(
369+
'data: {"type":"response.output_item.added","item":{"type":"text","text":"Chat reply"}}\n\n',
370+
),
371+
)
372+
controller.enqueue(new TextEncoder().encode("data: [DONE]\n\n"))
373+
controller.close()
374+
},
375+
}),
376+
})
377+
global.fetch = mockFetch as any
378+
379+
// Mock SDK to fail so it uses fetch
380+
mockResponsesCreate.mockRejectedValue(new Error("SDK not available"))
381+
382+
handler = new OpenAiNativeHandler({
383+
...mockOptions,
384+
apiModelId: "gpt-5.3-chat-latest",
385+
})
386+
387+
const stream = handler.createMessage(systemPrompt, messages)
388+
const chunks: any[] = []
389+
for await (const chunk of stream) {
390+
chunks.push(chunk)
391+
}
392+
393+
expect(mockFetch).toHaveBeenCalledWith(
394+
"https://api.openai.com/v1/responses",
395+
expect.objectContaining({
396+
body: expect.any(String),
397+
}),
398+
)
399+
const body = (mockFetch.mock.calls[0][1] as any).body as string
400+
const parsedBody = JSON.parse(body)
401+
expect(parsedBody.model).toBe("gpt-5.3-chat-latest")
402+
expect(parsedBody.max_output_tokens).toBe(16_384)
403+
expect(parsedBody.temperature).toBe(0)
404+
expect(parsedBody.reasoning?.effort).toBeUndefined()
405+
expect(parsedBody.text?.verbosity).toBeUndefined()
406+
407+
const textChunks = chunks.filter((chunk) => chunk.type === "text")
408+
expect(textChunks).toHaveLength(1)
409+
expect(textChunks[0].text).toBe("Chat reply")
410+
})
411+
348412
it("should handle GPT-5-mini model with Responses API", async () => {
349413
// Mock fetch for Responses API
350414
const mockFetch = vitest.fn().mockResolvedValue({

0 commit comments

Comments
 (0)