Skip to content

Commit c7c9817

Browse files
committed
feat(ai): add testMode option and update default model
1 parent 288024e commit c7c9817

3 files changed

Lines changed: 71 additions & 30 deletions

File tree

bin/index.js

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@ async function main() {
6363
.option('--temperature <number>', 'Temperature')
6464
.option('--host <host>', 'Host', '127.0.0.1')
6565
.option('--port <number>', 'Port', '8080')
66+
.option('--testMode <boolean>, --test-mode <boolean>', 'Test mode', 'false')
6667
.action(async (options) => {
6768
await startAIProxyServer(options);
6869
});

src/commands/ai.js

Lines changed: 35 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,36 @@ const normalizeNumber = (value, fallback) => {
1010
return Number.isFinite(parsed) ? parsed : fallback;
1111
};
1212

13+
const normalizeString = (value) => {
14+
if (value === undefined || value === null) return '';
15+
return String(value).trim();
16+
};
17+
18+
const normalizeBoolean = (value, fallback = false) => {
19+
if (value === undefined || value === null || value === '') return fallback;
20+
if (typeof value === 'boolean') return value;
21+
const normalized = String(value).trim().toLowerCase();
22+
if (['true', '1', 'yes', 'y', 'on'].includes(normalized)) return true;
23+
if (['false', '0', 'no', 'n', 'off'].includes(normalized)) return false;
24+
return fallback;
25+
};
26+
27+
const buildDefaults = (options = {}) => {
28+
const requestedModel = normalizeString(options.model);
29+
return {
30+
defaults: {
31+
host: options.host || '127.0.0.1',
32+
port: normalizeNumber(options.port, 8080),
33+
model: requestedModel || process.env.PUTER_AI_MODEL || 'gpt-4.1-nano',
34+
system: options.system ?? process.env.PUTER_AI_SYSTEM ?? '',
35+
maxTokens: normalizeNumber(options.maxTokens, 2048),
36+
temperature: normalizeNumber(options.temperature, 1),
37+
testMode: normalizeBoolean(options.testMode, false)
38+
},
39+
requestedModel
40+
};
41+
};
42+
1343
const estimateTokens = (text) => {
1444
if (!text) return 0;
1545
const trimmed = String(text).trim();
@@ -139,14 +169,7 @@ const resolveAvailableModelsRaw = async (puter) => {
139169
};
140170

141171
export const createAIProxyServer = (options = {}) => {
142-
const defaults = {
143-
host: options.host || '127.0.0.1',
144-
port: normalizeNumber(options.port, 8080),
145-
model: options.model || process.env.PUTER_AI_MODEL || 'gpt-5-nano',
146-
system: options.system ?? process.env.PUTER_AI_SYSTEM ?? '',
147-
maxTokens: normalizeNumber(options.maxTokens, 1024),
148-
temperature: normalizeNumber(options.temperature, 1)
149-
};
172+
const { defaults } = buildDefaults(options);
150173
const availableModelsRaw = options.availableModelsRaw;
151174
const availableModelsNormalized = Array.isArray(availableModelsRaw)
152175
? normalizeModelIds(availableModelsRaw)
@@ -201,6 +224,7 @@ export const createAIProxyServer = (options = {}) => {
201224
const temperature = normalizeNumber(body.temperature, defaults.temperature);
202225
const maxTokens = normalizeNumber(body.max_tokens, defaults.maxTokens);
203226
const stream = !!body.stream;
227+
const testMode = typeof body.testMode === 'boolean' ? body.testMode : defaults.testMode;
204228

205229
try {
206230
const profileModule = getProfileModule();
@@ -225,7 +249,7 @@ export const createAIProxyServer = (options = {}) => {
225249
}
226250
}
227251

228-
const result = await puter.ai.chat(prompt, {
252+
const result = await puter.ai.chat(prompt, testMode, {
229253
model,
230254
temperature,
231255
maxTokens,
@@ -257,17 +281,7 @@ export const createAIProxyServer = (options = {}) => {
257281
};
258282

259283
export const startAIProxyServer = async (options = {}) => {
260-
const requestedModel = typeof options.model === 'string'
261-
? options.model.trim()
262-
: (options.model ? String(options.model).trim() : '');
263-
const defaults = {
264-
host: options.host || '127.0.0.1',
265-
port: normalizeNumber(options.port, 8080),
266-
model: requestedModel || process.env.PUTER_AI_MODEL || 'gpt-5-nano',
267-
system: options.system ?? process.env.PUTER_AI_SYSTEM ?? '',
268-
maxTokens: normalizeNumber(options.maxTokens, 1024),
269-
temperature: normalizeNumber(options.temperature, 1)
270-
};
284+
const { defaults, requestedModel } = buildDefaults(options);
271285
const profileModule = getProfileModule();
272286
const authToken = profileModule.getAuthToken();
273287
if (!authToken) {
@@ -311,6 +325,7 @@ export const startAIProxyServer = async (options = {}) => {
311325
console.log(chalk.dim(` system: ${systemPreview}`));
312326
console.log(chalk.dim(` max_tokens: ${defaults.maxTokens}`));
313327
console.log(chalk.dim(` temperature: ${defaults.temperature}`));
328+
console.log(chalk.dim(` testMode: ${defaults.testMode}`));
314329
console.log(chalk.cyan('Usage'));
315330
console.log(chalk.dim(` GET http://${host}:${port}/v1/models`));
316331
console.log(chalk.dim(` POST http://${host}:${port}/v1/chat/completions`));

tests/ai.test.js

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,15 +38,15 @@ describe('AI proxy server', () => {
3838
it('serves models list', async () => {
3939
const puterMock = {
4040
ai: {
41-
listModels: vi.fn().mockResolvedValue(['gpt-5-nano'])
41+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
4242
}
4343
};
4444
const { port } = await startServer(puterMock);
4545
const response = await fetch(`http://127.0.0.1:${port}/v1/models`);
4646
const data = await response.json();
4747
expect(response.status).toBe(200);
4848
expect(data.object).toBe('list');
49-
expect(data.data[0].id).toBe('gpt-5-nano');
49+
expect(data.data[0].id).toBe('gpt-4.1-nano');
5050
});
5151

5252
it('serves root heartbeat', async () => {
@@ -66,15 +66,15 @@ describe('AI proxy server', () => {
6666
const puterMock = {
6767
ai: {
6868
chat: vi.fn().mockResolvedValue('Hello there'),
69-
listModels: vi.fn().mockResolvedValue(['gpt-5-nano'])
69+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
7070
}
7171
};
7272
const { port } = await startServer(puterMock);
7373
const response = await fetch(`http://127.0.0.1:${port}/v1/chat/completions`, {
7474
method: 'POST',
7575
headers: { 'content-type': 'application/json' },
7676
body: JSON.stringify({
77-
model: 'gpt-5-nano',
77+
model: 'gpt-4.1-nano',
7878
messages: [{ role: 'user', content: 'Hi' }]
7979
})
8080
});
@@ -84,19 +84,44 @@ describe('AI proxy server', () => {
8484
expect(data.choices[0].message.content).toBe('Hello there');
8585
});
8686

87+
it('passes testMode when provided', async () => {
88+
const chat = vi.fn().mockResolvedValue('Hello there');
89+
const puterMock = {
90+
ai: {
91+
chat,
92+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
93+
}
94+
};
95+
const { port } = await startServer(puterMock);
96+
const response = await fetch(`http://127.0.0.1:${port}/v1/chat/completions`, {
97+
method: 'POST',
98+
headers: { 'content-type': 'application/json' },
99+
body: JSON.stringify({
100+
testMode: true,
101+
messages: [{ role: 'user', content: 'Hi' }]
102+
})
103+
});
104+
const data = await response.json();
105+
expect(response.status).toBe(200);
106+
expect(data.choices[0].message.content).toBe('Hello there');
107+
expect(chat).toHaveBeenCalledWith('USER: Hi', true, expect.objectContaining({
108+
model: 'gpt-4.1-nano'
109+
}));
110+
});
111+
87112
it('serves streaming chat completion', async () => {
88113
const puterMock = {
89114
ai: {
90115
chat: vi.fn().mockResolvedValue('Hello world'),
91-
listModels: vi.fn().mockResolvedValue(['gpt-5-nano'])
116+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
92117
}
93118
};
94119
const { port } = await startServer(puterMock);
95120
const response = await fetch(`http://127.0.0.1:${port}/v1/chat/completions`, {
96121
method: 'POST',
97122
headers: { 'content-type': 'application/json' },
98123
body: JSON.stringify({
99-
model: 'gpt-5-nano',
124+
model: 'gpt-4.1-nano',
100125
stream: true,
101126
messages: [{ role: 'user', content: 'Hi' }]
102127
})
@@ -111,7 +136,7 @@ describe('AI proxy server', () => {
111136
const puterMock = {
112137
ai: {
113138
chat: vi.fn().mockResolvedValue('Hello world'),
114-
listModels: vi.fn().mockResolvedValue(['gpt-5-nano'])
139+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
115140
}
116141
};
117142
const { port } = await startServer(puterMock);
@@ -132,7 +157,7 @@ describe('AI proxy server', () => {
132157
vi.mocked(getProfileModule).mockReturnValue({
133158
getAuthToken: vi.fn(() => 'test-token')
134159
});
135-
const listModels = vi.fn().mockResolvedValue(['gpt-5-nano']);
160+
const listModels = vi.fn().mockResolvedValue(['gpt-4.1-nano']);
136161
vi.mocked(getPuter).mockReturnValue({
137162
ai: {
138163
listModels
@@ -149,10 +174,10 @@ describe('AI proxy server', () => {
149174
});
150175
vi.mocked(getPuter).mockReturnValue({
151176
ai: {
152-
listModels: vi.fn().mockResolvedValue(['gpt-5-nano'])
177+
listModels: vi.fn().mockResolvedValue(['gpt-4.1-nano'])
153178
}
154179
});
155-
const server = await startAIProxyServer({ model: 'gpt-5-nano', port: 0 });
180+
const server = await startAIProxyServer({ model: 'gpt-4.1-nano', port: 0 });
156181
await server.stop();
157182
});
158183
});

0 commit comments

Comments
 (0)