Skip to content

Commit 951cc9e

Browse files
📝 Add docstrings to nanogpt-provider
Docstrings generation was requested by @0xGingi. * #2746 (comment) The following files were modified: * `cmd/testai/main-testai.go` * `pkg/aiusechat/usechat-mode.go`
1 parent a07e078 commit 951cc9e

2 files changed

Lines changed: 101 additions & 5 deletions

File tree

cmd/testai/main-testai.go

Lines changed: 83 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ const (
2727
DefaultAnthropicModel = "claude-sonnet-4-5"
2828
DefaultOpenAIModel = "gpt-5.1"
2929
DefaultOpenRouterModel = "mistralai/mistral-small-3.2-24b-instruct"
30+
DefaultNanoGPTModel = "zai-org/glm-4.7"
3031
DefaultGeminiModel = "gemini-3-pro-preview"
3132
)
3233

@@ -207,6 +208,12 @@ func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes.
207208
}
208209
}
209210

211+
// testOpenRouter sends the provided message to the OpenRouter chat completions endpoint using WaveAIPostMessageWrap
212+
// and streams the model's output to a test SSE writer.
213+
//
214+
// It requires the OPENROUTER_APIKEY environment variable and will exit the process with a message if the key is missing.
215+
// The given model, message, and tool definitions are included in the chat request. The function prints test metadata
216+
// (model, message, chat ID) and any streaming error to stdout.
210217
func testOpenRouter(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
211218
apiKey := os.Getenv("OPENROUTER_APIKEY")
212219
if apiKey == "" {
@@ -257,6 +264,60 @@ func testOpenRouter(ctx context.Context, model, message string, tools []uctypes.
257264
}
258265
}
259266

267+
// testNanoGPT sends a single test message to a NanoGPT endpoint using WaveAIPostMessageWrap and streams the AI response to a TestResponseWriter.
268+
// It prints the chosen model, message, and generated chat ID to stdout. If the NANOGPT_KEY environment variable is not set, the function prints an error and exits the process.
269+
func testNanoGPT(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
270+
apiKey := os.Getenv("NANOGPT_KEY")
271+
if apiKey == "" {
272+
fmt.Println("Error: NANOGPT_KEY environment variable not set")
273+
os.Exit(1)
274+
}
275+
276+
opts := &uctypes.AIOptsType{
277+
APIType: uctypes.APIType_OpenAIChat,
278+
APIToken: apiKey,
279+
Endpoint: "https://nano-gpt.com/api/v1/chat/completions",
280+
Model: model,
281+
MaxTokens: 4096,
282+
}
283+
284+
chatID := uuid.New().String()
285+
286+
aiMessage := &uctypes.AIMessage{
287+
MessageId: uuid.New().String(),
288+
Parts: []uctypes.AIMessagePart{
289+
{
290+
Type: uctypes.AIMessagePartTypeText,
291+
Text: message,
292+
},
293+
},
294+
}
295+
296+
fmt.Printf("Testing NanoGPT with WaveAIPostMessageWrap, model: %s\n", model)
297+
fmt.Printf("Message: %s\n", message)
298+
fmt.Printf("Chat ID: %s\n", chatID)
299+
fmt.Println("---")
300+
301+
testWriter := &TestResponseWriter{}
302+
sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx)
303+
defer sseHandler.Close()
304+
305+
chatOpts := uctypes.WaveChatOpts{
306+
ChatId: chatID,
307+
ClientId: uuid.New().String(),
308+
Config: *opts,
309+
Tools: tools,
310+
SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."},
311+
}
312+
err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts)
313+
if err != nil {
314+
fmt.Printf("NanoGPT streaming error: %v\n", err)
315+
}
316+
}
317+
318+
// testAnthropic runs a streaming test against Anthropic using the given model, message, and tools.
319+
// It requires the ANTHROPIC_APIKEY environment variable and will print an error and exit if it is unset.
320+
// The function prints test info (model, message, chat ID) and streams the model's responses to stdout via an SSE test writer using WaveAIPostMessageWrap.
260321
func testAnthropic(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
261322
apiKey := os.Getenv("ANTHROPIC_APIKEY")
262323
if apiKey == "" {
@@ -374,14 +435,16 @@ func testT3(ctx context.Context) {
374435
testOpenAIComp(ctx, "gpt-4o", "what is 2+2? please be brief", nil)
375436
}
376437

438+
// testT4 runs a preset Gemini integration test that asks the model to compute 2+2+8 using the provider adder tool.
377439
func testT4(ctx context.Context) {
378440
tool := aiusechat.GetAdderToolDefinition()
379441
tools := []uctypes.ToolDefinition{tool}
380442
testGemini(ctx, DefaultGeminiModel, "what is 2+2+8, use the provider adder tool", tools)
381443
}
382444

445+
// printUsage prints the program usage help including example invocations, the default model names for each supported provider, and the environment variables required for API keys.
383446
func printUsage() {
384-
fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp|--openrouter|--gemini] [--tools] [--model <model>] [message]")
447+
fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp|--openrouter|--nanogpt|--gemini] [--tools] [--model <model>] [message]")
385448
fmt.Println("Examples:")
386449
fmt.Println(" go run main-testai.go 'What is 2+2?'")
387450
fmt.Println(" go run main-testai.go --model o4-mini 'What is 2+2?'")
@@ -390,6 +453,8 @@ func printUsage() {
390453
fmt.Println(" go run main-testai.go --openaicomp --model gpt-4o 'What is 2+2?'")
391454
fmt.Println(" go run main-testai.go --openrouter 'What is 2+2?'")
392455
fmt.Println(" go run main-testai.go --openrouter --model anthropic/claude-3.5-sonnet 'What is 2+2?'")
456+
fmt.Println(" go run main-testai.go --nanogpt 'What is 2+2?'")
457+
fmt.Println(" go run main-testai.go --nanogpt --model gpt-4o 'What is 2+2?'")
393458
fmt.Println(" go run main-testai.go --gemini 'What is 2+2?'")
394459
fmt.Println(" go run main-testai.go --gemini --model gemini-1.5-pro 'What is 2+2?'")
395460
fmt.Println(" go run main-testai.go --tools 'Help me configure GitHub Actions monitoring'")
@@ -399,24 +464,34 @@ func printUsage() {
399464
fmt.Printf(" Anthropic: %s\n", DefaultAnthropicModel)
400465
fmt.Printf(" OpenAI Completions: gpt-4o\n")
401466
fmt.Printf(" OpenRouter: %s\n", DefaultOpenRouterModel)
467+
fmt.Printf(" NanoGPT: %s\n", DefaultNanoGPTModel)
402468
fmt.Printf(" Google Gemini: %s\n", DefaultGeminiModel)
403469
fmt.Println("")
404470
fmt.Println("Environment variables:")
405471
fmt.Println(" OPENAI_APIKEY (for OpenAI models)")
406472
fmt.Println(" ANTHROPIC_APIKEY (for Anthropic models)")
407473
fmt.Println(" OPENROUTER_APIKEY (for OpenRouter models)")
474+
fmt.Println(" NANOGPT_KEY (for NanoGPT models)")
408475
fmt.Println(" GOOGLE_APIKEY (for Google Gemini models)")
409476
}
410477

478+
// main parses command-line flags, selects an AI provider and model, and runs the corresponding test flow.
479+
//
480+
// Recognized flags allow choosing among Anthropic, OpenAI (chat or completions), OpenRouter, NanoGPT, and Google Gemini,
481+
// enable optional test tools, request usage, or run one of the preset tests (t1–t4). If a preset test is requested the
482+
// program runs it and exits. When no model is provided, a sensible default is chosen based on the selected provider.
483+
// The first non-flag argument is used as the prompt (defaults to "What is 2+2?"). If tools are enabled, tool definitions
484+
// are loaded and passed to the selected provider-specific test function, which performs the streaming test run.
411485
func main() {
412-
var anthropic, openaicomp, openrouter, gemini, tools, help, t1, t2, t3, t4 bool
486+
var anthropic, openaicomp, openrouter, nanogpt, gemini, tools, help, t1, t2, t3, t4 bool
413487
var model string
414488
flag.BoolVar(&anthropic, "anthropic", false, "Use Anthropic API instead of OpenAI")
415489
flag.BoolVar(&openaicomp, "openaicomp", false, "Use OpenAI Completions API")
416490
flag.BoolVar(&openrouter, "openrouter", false, "Use OpenRouter API")
491+
flag.BoolVar(&nanogpt, "nanogpt", false, "Use NanoGPT API")
417492
flag.BoolVar(&gemini, "gemini", false, "Use Google Gemini API")
418493
flag.BoolVar(&tools, "tools", false, "Enable GitHub Actions Monitor tools for testing")
419-
flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic, %s for OpenRouter, %s for Gemini)", DefaultOpenAIModel, DefaultAnthropicModel, DefaultOpenRouterModel, DefaultGeminiModel))
494+
flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic, %s for OpenRouter, %s for NanoGPT, %s for Gemini)", DefaultOpenAIModel, DefaultAnthropicModel, DefaultOpenRouterModel, DefaultNanoGPTModel, DefaultGeminiModel))
420495
flag.BoolVar(&help, "help", false, "Show usage information")
421496
flag.BoolVar(&t1, "t1", false, fmt.Sprintf("Run preset T1 test (%s with 'what is 2+2')", DefaultAnthropicModel))
422497
flag.BoolVar(&t2, "t2", false, fmt.Sprintf("Run preset T2 test (%s with 'what is 2+2')", DefaultOpenAIModel))
@@ -457,6 +532,8 @@ func main() {
457532
model = "gpt-4o"
458533
} else if openrouter {
459534
model = DefaultOpenRouterModel
535+
} else if nanogpt {
536+
model = DefaultNanoGPTModel
460537
} else if gemini {
461538
model = DefaultGeminiModel
462539
} else {
@@ -481,9 +558,11 @@ func main() {
481558
testOpenAIComp(ctx, model, message, toolDefs)
482559
} else if openrouter {
483560
testOpenRouter(ctx, model, message, toolDefs)
561+
} else if nanogpt {
562+
testNanoGPT(ctx, model, message, toolDefs)
484563
} else if gemini {
485564
testGemini(ctx, model, message, toolDefs)
486565
} else {
487566
testOpenAI(ctx, model, message, toolDefs)
488567
}
489-
}
568+
}

pkg/aiusechat/usechat-mode.go

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ const (
2121
OpenAIResponsesEndpoint = "https://api.openai.com/v1/responses"
2222
OpenAIChatEndpoint = "https://api.openai.com/v1/chat/completions"
2323
OpenRouterChatEndpoint = "https://openrouter.ai/api/v1/chat/completions"
24+
NanoGPTChatEndpoint = "https://nano-gpt.com/api/v1/chat/completions"
2425
AzureLegacyEndpointTemplate = "https://%s.openai.azure.com/openai/deployments/%s/chat/completions?api-version=%s"
2526
AzureResponsesEndpointTemplate = "https://%s.openai.azure.com/openai/v1/responses"
2627
AzureChatEndpointTemplate = "https://%s.openai.azure.com/openai/v1/chat/completions"
@@ -30,6 +31,7 @@ const (
3031

3132
OpenAIAPITokenSecretName = "OPENAI_KEY"
3233
OpenRouterAPITokenSecretName = "OPENROUTER_KEY"
34+
NanoGPTAPITokenSecretName = "NANOGPT_KEY"
3335
AzureOpenAIAPITokenSecretName = "AZURE_OPENAI_KEY"
3436
GoogleAIAPITokenSecretName = "GOOGLE_AI_KEY"
3537
)
@@ -53,6 +55,10 @@ func resolveAIMode(requestedMode string, premium bool) (string, *wconfig.AIModeC
5355
return mode, config, nil
5456
}
5557

58+
// applyProviderDefaults fills missing fields in an AIModeConfigType with sensible provider-specific defaults.
59+
// It mutates the provided config in-place.
60+
// Defaults set include APIType, Endpoint, APITokenSecretName, Capabilities, Azure API version and endpoints,
61+
// Wave cloud enablement, and provider-specific endpoint templates where applicable.
5662
func applyProviderDefaults(config *wconfig.AIModeConfigType) {
5763
if config.Provider == uctypes.AIProvider_Wave {
5864
config.WaveAICloud = true
@@ -99,6 +105,17 @@ func applyProviderDefaults(config *wconfig.AIModeConfigType) {
99105
config.APITokenSecretName = OpenRouterAPITokenSecretName
100106
}
101107
}
108+
if config.Provider == uctypes.AIProvider_NanoGPT {
109+
if config.APIType == "" {
110+
config.APIType = uctypes.APIType_OpenAIChat
111+
}
112+
if config.Endpoint == "" {
113+
config.Endpoint = NanoGPTChatEndpoint
114+
}
115+
if config.APITokenSecretName == "" {
116+
config.APITokenSecretName = NanoGPTAPITokenSecretName
117+
}
118+
}
102119
if config.Provider == uctypes.AIProvider_AzureLegacy {
103120
if config.AzureAPIVersion == "" {
104121
config.AzureAPIVersion = AzureLegacyDefaultAPIVersion
@@ -264,4 +281,4 @@ func broadcastAIModeConfigs(configs map[string]wconfig.AIModeConfigType) {
264281
Event: wps.Event_AIModeConfig,
265282
Data: update,
266283
})
267-
}
284+
}

0 commit comments

Comments
 (0)