-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm-config.ts
More file actions
133 lines (116 loc) · 3.79 KB
/
llm-config.ts
File metadata and controls
133 lines (116 loc) · 3.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/**
* LLM Configuration utilities.
*
* Supports two ways to specify an LLM:
* 1. Provider prefix: "openai:gpt-4.1-mini", "groq:llama-3.3-70b-versatile"
* 2. JSON config: '{"apiFormat":"openai-responses","model":"...","baseUrl":"...","apiKeyEnvVar":"..."}'
*/
import { Config, Context, Effect, Layer, Redacted } from "effect"
import { type ApiFormat, LlmConfig } from "./domain.ts"
// Re-export for convenience
export { LlmConfig }
/** Provider presets define common provider configurations */
interface ProviderPreset {
readonly apiFormat: ApiFormat
readonly baseUrl: string
readonly apiKeyEnvVar: string
}
const openai: ProviderPreset = {
apiFormat: "openai-responses",
baseUrl: "https://api.openai.com/v1",
apiKeyEnvVar: "OPENAI_API_KEY"
}
const anthropic: ProviderPreset = {
apiFormat: "anthropic",
baseUrl: "https://api.anthropic.com",
apiKeyEnvVar: "ANTHROPIC_API_KEY"
}
const gemini: ProviderPreset = {
apiFormat: "gemini",
baseUrl: "https://generativelanguage.googleapis.com",
apiKeyEnvVar: "GEMINI_API_KEY"
}
const openrouter: ProviderPreset = {
apiFormat: "openai-chat-completions",
baseUrl: "https://openrouter.ai/api/v1",
apiKeyEnvVar: "OPENROUTER_API_KEY"
}
const cerebras: ProviderPreset = {
apiFormat: "openai-chat-completions",
baseUrl: "https://api.cerebras.ai/v1",
apiKeyEnvVar: "CEREBRAS_API_KEY"
}
const groq: ProviderPreset = {
apiFormat: "openai-chat-completions",
baseUrl: "https://api.groq.com/openai/v1",
apiKeyEnvVar: "GROQ_API_KEY"
}
/** Provider prefix mapping for dynamic model resolution */
const PROVIDER_PREFIXES: Record<string, ProviderPreset> = {
openai,
anthropic,
gemini,
openrouter,
cerebras,
groq
}
export const DEFAULT_LLM = "openai:gpt-4.1-mini"
/**
* Get LlmConfig by prefix or JSON.
*
* Supports:
* - Provider prefix: "openai:gpt-4.1-mini", "openrouter:anthropic/claude-3.5-sonnet"
* - JSON config: '{"apiFormat":"openai-responses",...}'
*/
export const getLlmConfig = (name: string): LlmConfig => {
// Check for provider prefix (e.g., "openrouter:model-name")
const colonIndex = name.indexOf(":")
if (colonIndex > 0) {
const prefix = name.slice(0, colonIndex)
const modelName = name.slice(colonIndex + 1)
// Check if it's a JSON config (starts with "{")
if (prefix !== "" && !modelName.startsWith("{")) {
const provider = PROVIDER_PREFIXES[prefix]
if (provider) {
if (!modelName) {
throw new Error(`Missing model name for provider '${prefix}'. Use: ${prefix}:<model-name>`)
}
return new LlmConfig({ ...provider, model: modelName })
}
}
}
// Try parsing as JSON config
if (name.startsWith("{")) {
try {
const parsed = JSON.parse(name)
return new LlmConfig(parsed)
} catch {
throw new Error(`Invalid JSON LLM config: ${name}`)
}
}
const validPrefixes = Object.keys(PROVIDER_PREFIXES).join(", ")
throw new Error(
`Invalid LLM: ${name}\n` +
`Use prefix syntax: ${validPrefixes}:<model-name>\n` +
`Example: openai:gpt-4.1-mini, anthropic:claude-sonnet-4-20250514`
)
}
/** Get API key for an LlmConfig from environment */
export const getApiKey = (config: LlmConfig): Redacted.Redacted | undefined => {
const apiKeyValue = process.env[config.apiKeyEnvVar]
return apiKeyValue ? Redacted.make(apiKeyValue) : undefined
}
/** Resolve LLM config from Config. */
export const resolveLlmConfig = Effect.gen(function*() {
const llmName = yield* Config.string("LLM").pipe(Config.withDefault(DEFAULT_LLM))
return getLlmConfig(llmName)
})
/** Service to access the resolved LlmConfig */
export class CurrentLlmConfig extends Context.Tag("@app/CurrentLlmConfig")<
CurrentLlmConfig,
LlmConfig
>() {
static fromConfig(config: LlmConfig): Layer.Layer<CurrentLlmConfig> {
return Layer.succeed(CurrentLlmConfig, config)
}
}