diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 4658bc1af2..4f8f4a8473 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -23,6 +23,7 @@ declare global { "ai:apitype"?: string; "ai:model"?: string; "ai:thinkinglevel"?: string; + "ai:verbosity"?: string; "ai:endpoint"?: string; "ai:azureapiversion"?: string; "ai:apitoken"?: string; diff --git a/pkg/aiusechat/openai/openai-convertmessage.go b/pkg/aiusechat/openai/openai-convertmessage.go index fd0df85486..3accb354fa 100644 --- a/pkg/aiusechat/openai/openai-convertmessage.go +++ b/pkg/aiusechat/openai/openai-convertmessage.go @@ -22,6 +22,8 @@ import ( const ( OpenAIDefaultAPIVersion = "2024-12-31" OpenAIDefaultMaxTokens = 4096 + // "medium" verbosity is more widely supported across models than "low" + OpenAIDefaultVerbosity = "medium" ) // convertContentBlockToParts converts a single content block to UIMessageParts @@ -190,10 +192,11 @@ func debugPrintReq(req *OpenAIRequest, endpoint string) { func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse) (*http.Request, error) { opts := chatOpts.Config - // If continuing from premium rate limit, downgrade to default model and low thinking + // If continuing from premium rate limit, downgrade to default model and medium thinking + // (medium is more widely supported than low across different models) if cont != nil && cont.ContinueFromKind == uctypes.StopKindPremiumRateLimit { opts.Model = uctypes.DefaultOpenAIModel - opts.ThinkingLevel = uctypes.ThinkingLevelLow + opts.ThinkingLevel = uctypes.ThinkingLevelMedium } if opts.Model == "" { @@ -229,13 +232,18 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes. } // Build request body + // Use configured verbosity, or fall back to default constant + verbosity := opts.Verbosity + if verbosity == "" { + verbosity = OpenAIDefaultVerbosity + } reqBody := &OpenAIRequest{ Model: opts.Model, Input: inputs, Stream: true, StreamOptions: &StreamOptionsType{IncludeObfuscation: false}, MaxOutputTokens: maxTokens, - Text: &TextType{Verbosity: "low"}, + Text: &TextType{Verbosity: verbosity}, } // Add system prompt as instructions if provided @@ -264,10 +272,10 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes. reqBody.Tools = append(reqBody.Tools, webSearchTool) } - // Set reasoning based on thinking level + // Set reasoning based on thinking level from config if opts.ThinkingLevel != "" { reqBody.Reasoning = &ReasoningType{ - Effort: opts.ThinkingLevel, // low, medium, high map directly + Effort: opts.ThinkingLevel, } if opts.Model == "gpt-5" || opts.Model == "gpt-5.1" { reqBody.Reasoning.Summary = "auto" diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index b857f141bd..fac8cb7f2a 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -278,6 +278,7 @@ type AIOptsType struct { MaxTokens int `json:"maxtokens,omitempty"` TimeoutMs int `json:"timeoutms,omitempty"` ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh + Verbosity string `json:"verbosity,omitempty"` // Text verbosity level (OpenAI Responses API only, ignored by other backends) AIMode string `json:"aimode,omitempty"` Capabilities []string `json:"capabilities,omitempty"` WaveAIPremium bool `json:"waveaipremium,omitempty"` diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 8d8fcf6446..46ec030634 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -110,12 +110,17 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo, if thinkingLevel == "" { thinkingLevel = uctypes.ThinkingLevelMedium } + verbosity := config.Verbosity + if verbosity == "" { + verbosity = uctypes.ThinkingLevelMedium // default to medium + } opts := &uctypes.AIOptsType{ Provider: config.Provider, APIType: config.APIType, Model: config.Model, MaxTokens: maxTokens, ThinkingLevel: thinkingLevel, + Verbosity: verbosity, AIMode: aiMode, Endpoint: baseUrl, Capabilities: config.Capabilities, diff --git a/pkg/wconfig/settingsconfig.go b/pkg/wconfig/settingsconfig.go index 50a1da2474..efeedfc6db 100644 --- a/pkg/wconfig/settingsconfig.go +++ b/pkg/wconfig/settingsconfig.go @@ -273,6 +273,7 @@ type AIModeConfigType struct { APIType string `json:"ai:apitype,omitempty" jsonschema:"enum=google-gemini,enum=openai-responses,enum=openai-chat"` Model string `json:"ai:model,omitempty"` ThinkingLevel string `json:"ai:thinkinglevel,omitempty" jsonschema:"enum=low,enum=medium,enum=high"` + Verbosity string `json:"ai:verbosity,omitempty" jsonschema:"enum=low,enum=medium,enum=high,description=Text verbosity level (OpenAI Responses API only)"` Endpoint string `json:"ai:endpoint,omitempty"` AzureAPIVersion string `json:"ai:azureapiversion,omitempty"` APIToken string `json:"ai:apitoken,omitempty"` diff --git a/schema/waveai.json b/schema/waveai.json index 1612cac88d..1c3a186beb 100644 --- a/schema/waveai.json +++ b/schema/waveai.json @@ -46,6 +46,15 @@ "high" ] }, + "ai:verbosity": { + "type": "string", + "enum": [ + "low", + "medium", + "high" + ], + "description": "Text verbosity level (OpenAI Responses API only)" + }, "ai:endpoint": { "type": "string" },