@@ -10,6 +10,36 @@ const normalizeNumber = (value, fallback) => {
1010 return Number . isFinite ( parsed ) ? parsed : fallback ;
1111} ;
1212
13+ const normalizeString = ( value ) => {
14+ if ( value === undefined || value === null ) return '' ;
15+ return String ( value ) . trim ( ) ;
16+ } ;
17+
18+ const normalizeBoolean = ( value , fallback = false ) => {
19+ if ( value === undefined || value === null || value === '' ) return fallback ;
20+ if ( typeof value === 'boolean' ) return value ;
21+ const normalized = String ( value ) . trim ( ) . toLowerCase ( ) ;
22+ if ( [ 'true' , '1' , 'yes' , 'y' , 'on' ] . includes ( normalized ) ) return true ;
23+ if ( [ 'false' , '0' , 'no' , 'n' , 'off' ] . includes ( normalized ) ) return false ;
24+ return fallback ;
25+ } ;
26+
27+ const buildDefaults = ( options = { } ) => {
28+ const requestedModel = normalizeString ( options . model ) ;
29+ return {
30+ defaults : {
31+ host : options . host || '127.0.0.1' ,
32+ port : normalizeNumber ( options . port , 8080 ) ,
33+ model : requestedModel || process . env . PUTER_AI_MODEL || 'gpt-4.1-nano' ,
34+ system : options . system ?? process . env . PUTER_AI_SYSTEM ?? '' ,
35+ maxTokens : normalizeNumber ( options . maxTokens , 2048 ) ,
36+ temperature : normalizeNumber ( options . temperature , 1 ) ,
37+ testMode : normalizeBoolean ( options . testMode , false )
38+ } ,
39+ requestedModel
40+ } ;
41+ } ;
42+
1343const estimateTokens = ( text ) => {
1444 if ( ! text ) return 0 ;
1545 const trimmed = String ( text ) . trim ( ) ;
@@ -139,14 +169,7 @@ const resolveAvailableModelsRaw = async (puter) => {
139169} ;
140170
141171export const createAIProxyServer = ( options = { } ) => {
142- const defaults = {
143- host : options . host || '127.0.0.1' ,
144- port : normalizeNumber ( options . port , 8080 ) ,
145- model : options . model || process . env . PUTER_AI_MODEL || 'gpt-5-nano' ,
146- system : options . system ?? process . env . PUTER_AI_SYSTEM ?? '' ,
147- maxTokens : normalizeNumber ( options . maxTokens , 1024 ) ,
148- temperature : normalizeNumber ( options . temperature , 1 )
149- } ;
172+ const { defaults } = buildDefaults ( options ) ;
150173 const availableModelsRaw = options . availableModelsRaw ;
151174 const availableModelsNormalized = Array . isArray ( availableModelsRaw )
152175 ? normalizeModelIds ( availableModelsRaw )
@@ -201,6 +224,7 @@ export const createAIProxyServer = (options = {}) => {
201224 const temperature = normalizeNumber ( body . temperature , defaults . temperature ) ;
202225 const maxTokens = normalizeNumber ( body . max_tokens , defaults . maxTokens ) ;
203226 const stream = ! ! body . stream ;
227+ const testMode = typeof body . testMode === 'boolean' ? body . testMode : defaults . testMode ;
204228
205229 try {
206230 const profileModule = getProfileModule ( ) ;
@@ -225,7 +249,7 @@ export const createAIProxyServer = (options = {}) => {
225249 }
226250 }
227251
228- const result = await puter . ai . chat ( prompt , {
252+ const result = await puter . ai . chat ( prompt , testMode , {
229253 model,
230254 temperature,
231255 maxTokens,
@@ -257,17 +281,7 @@ export const createAIProxyServer = (options = {}) => {
257281} ;
258282
259283export const startAIProxyServer = async ( options = { } ) => {
260- const requestedModel = typeof options . model === 'string'
261- ? options . model . trim ( )
262- : ( options . model ? String ( options . model ) . trim ( ) : '' ) ;
263- const defaults = {
264- host : options . host || '127.0.0.1' ,
265- port : normalizeNumber ( options . port , 8080 ) ,
266- model : requestedModel || process . env . PUTER_AI_MODEL || 'gpt-5-nano' ,
267- system : options . system ?? process . env . PUTER_AI_SYSTEM ?? '' ,
268- maxTokens : normalizeNumber ( options . maxTokens , 1024 ) ,
269- temperature : normalizeNumber ( options . temperature , 1 )
270- } ;
284+ const { defaults, requestedModel } = buildDefaults ( options ) ;
271285 const profileModule = getProfileModule ( ) ;
272286 const authToken = profileModule . getAuthToken ( ) ;
273287 if ( ! authToken ) {
@@ -311,6 +325,7 @@ export const startAIProxyServer = async (options = {}) => {
311325 console . log ( chalk . dim ( ` system: ${ systemPreview } ` ) ) ;
312326 console . log ( chalk . dim ( ` max_tokens: ${ defaults . maxTokens } ` ) ) ;
313327 console . log ( chalk . dim ( ` temperature: ${ defaults . temperature } ` ) ) ;
328+ console . log ( chalk . dim ( ` testMode: ${ defaults . testMode } ` ) ) ;
314329 console . log ( chalk . cyan ( 'Usage' ) ) ;
315330 console . log ( chalk . dim ( ` GET http://${ host } :${ port } /v1/models` ) ) ;
316331 console . log ( chalk . dim ( ` POST http://${ host } :${ port } /v1/chat/completions` ) ) ;
0 commit comments