diff --git a/.gitignore b/.gitignore index 19198a7a5918..08cdba700ea8 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,6 @@ UPCOMING_CHANGELOG.md logs/ *.bun-build tsconfig.tsbuildinfo + +# Sisyphus orchestrator state (local only) +.sisyphus/ diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index b13d3a8c8131..3b3c6f6d30cd 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -513,7 +513,8 @@ export const layer = Layer.effect( for (const [key, value] of Object.entries(auth)) { if (value.type === "wellknown") { const url = key.replace(/\/+$/, "") - process.env[value.key] = value.token + // TODO(multi-instance): see env/index.ts docstring. + yield* env.set(value.key, value.token) log.debug("fetching remote config", { url: `${url}/.well-known/opencode` }) const response = yield* Effect.promise(() => fetch(`${url}/.well-known/opencode`)) if (!response.ok) { @@ -647,7 +648,7 @@ export const layer = Layer.effect( { concurrency: 2 }, ) if (Option.isSome(tokenOpt)) { - process.env["OPENCODE_CONSOLE_TOKEN"] = tokenOpt.value + // TODO(multi-instance): see env/index.ts docstring. yield* env.set("OPENCODE_CONSOLE_TOKEN", tokenOpt.value) } diff --git a/packages/opencode/src/env/index.ts b/packages/opencode/src/env/index.ts index e7af61422b43..35fcec39c4e5 100644 --- a/packages/opencode/src/env/index.ts +++ b/packages/opencode/src/env/index.ts @@ -1,6 +1,10 @@ import { Context, Effect, Layer } from "effect" -import { InstanceState } from "@/effect/instance-state" +/** + * Effect-aware wrapper around `process.env`. Reads are live (no snapshot); + * `set`/`remove` mutate `process.env` directly. Writes are process-wide — + * call sites that persist auth-derived values are marked `TODO(multi-instance)`. + */ type State = Record export interface Interface { @@ -12,23 +16,21 @@ export interface Interface { export class Service extends Context.Service()("@opencode/Env") {} -export const layer = Layer.effect( +export const layer = Layer.succeed( Service, - Effect.gen(function* () { - const state = yield* InstanceState.make(Effect.fn("Env.state")(() => Effect.succeed({ ...process.env }))) - - const get = Effect.fn("Env.get")((key: string) => InstanceState.use(state, (env) => env[key])) - const all = Effect.fn("Env.all")(() => InstanceState.get(state)) - const set = Effect.fn("Env.set")(function* (key: string, value: string) { - const env = yield* InstanceState.get(state) - env[key] = value - }) - const remove = Effect.fn("Env.remove")(function* (key: string) { - const env = yield* InstanceState.get(state) - delete env[key] - }) - - return Service.of({ get, all, set, remove }) + Service.of({ + get: (key: string) => Effect.sync(() => process.env[key]), + all: () => Effect.sync(() => ({ ...process.env })), + set: Effect.fn("Env.set")((key: string, value: string) => + Effect.sync(() => { + process.env[key] = value + }), + ), + remove: Effect.fn("Env.remove")((key: string) => + Effect.sync(() => { + delete process.env[key] + }), + ), }), ) diff --git a/packages/opencode/src/provider/overlay.ts b/packages/opencode/src/provider/overlay.ts new file mode 100644 index 000000000000..2b238f7bb0a2 --- /dev/null +++ b/packages/opencode/src/provider/overlay.ts @@ -0,0 +1,93 @@ +import { Hash } from "@opencode-ai/core/util/hash" + +import type { ProviderID } from "@/provider/schema" +import type { Provider } from "@/provider/provider" + +interface OverlayState { + cachedProviders: Record + cleanedDatabase: Readonly> +} + +/** + * Pure single-provider env-overlay step. See `overlay.test.ts` for the + * exhaustive precedence table. + */ +export function resolveEnvOverlay( + cached: Provider.Info | undefined, + candidate: Provider.Info, + apiKey: string | undefined, +): Provider.Info | undefined { + if (!apiKey) { + if (cached?.source === "env") return undefined + return cached + } + if (cached && cached.source !== "env") { + if (!cached.key && candidate.env.length === 1) return { ...cached, key: apiKey } + return cached + } + // Multi-env candidate: cached.key has no single source of truth, preserve it. + const nextKey = candidate.env.length === 1 ? apiKey : cached?.key + if (cached && cached.key === nextKey) return cached + if (cached) return { ...cached, key: nextKey } + return { ...candidate, source: "env", key: nextKey } +} + +export function currentProviders( + s: OverlayState, + envs: Record, +): Record { + const result: Record = { ...s.cachedProviders } + for (const [id, info] of Object.entries(s.cleanedDatabase)) { + const providerID = id as ProviderID + // Empty/whitespace env values count as absent. Non-blank values are + // passed through verbatim — trimming a real key would be silently wrong. + const apiKey = info.env.map((k) => envs[k]).find(isNonBlank) + const next = resolveEnvOverlay(result[providerID], info, apiKey) + if (next) result[providerID] = next + else delete result[providerID] + } + return result +} + +export function isNonBlank(v: string | undefined): v is string { + return typeof v === "string" && v.trim() !== "" +} + +// JSON.stringify drops functions silently and throws on BigInt. Tag both so +// distinct closures (e.g. AWS `coalesceProvider`) and BigInt values produce +// stable, distinct hashes. Anonymous arrows collide on `__fn:anon` — that is +// intentional: the per-call `fetch` wrapper built in `resolveSDK` would +// otherwise bust the SDK cache on every invocation. +// +// CAVEAT: same-named closures from unrelated callers (e.g. a third-party +// plugin storing a `coalesceProvider` in `provider.options`) collide and may +// silently serve a stale SDK. Plugin authors must keep stateful closures out +// of `provider.options` outside the per-call `fetch` convention. +// +// UNSUPPORTED INPUT TYPES (silent collisions or throws — do not place these +// in `provider.options`): +// - `Map`, `Set`, `WeakMap`, `WeakSet` — `JSON.stringify` returns `"{}"`, +// so two distinct instances collide on the same hash. +// - `RegExp` — also serializes to `"{}"`, same collision. +// - `Symbol` — silently dropped by `JSON.stringify` (becomes `undefined`). +// - Circular references — `JSON.stringify` throws `TypeError`, propagated +// as a defect through `getLanguage`/`resolveSDK`. +// - `Buffer` / `Uint8Array` — serialized as `{0:n,1:n,...}`; large but +// distinct, so correct but inefficient. +// - `Date`, `URL` — handled correctly via their `toJSON()` (ISO string, +// `href`). +// +// TODO(hash): swap for `effect/Hash` + `Equal.equals` with WeakMap-tracked +// function identity to fix the named-collision risk and the unsupported +// types above. +export function hashIdentity(parts: Record): string { + return Hash.fast( + JSON.stringify(parts, (_key, value) => { + if (typeof value === "function") return `__fn:${value.name || "anon"}` + if (typeof value === "bigint") return `${value.toString()}n` + return value + }), + ) +} + +export * as ProviderOverlay from "./overlay" diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 063e2800d167..a524ca297ebb 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -6,6 +6,7 @@ import { NoSuchModelError, type Provider as SDK } from "ai" import * as Log from "@opencode-ai/core/util/log" import { Npm } from "@opencode-ai/core/npm" import { Hash } from "@opencode-ai/core/util/hash" +import { currentProviders, hashIdentity, isNonBlank } from "@/provider/overlay" import { Plugin } from "../plugin" import { type LanguageModelV3 } from "@ai-sdk/provider" import * as ModelsDev from "@opencode-ai/core/models" @@ -117,7 +118,10 @@ const BUNDLED_PROVIDERS: Record Promise<(opts: any) => BundledSDK> } type CustomModelLoader = (sdk: any, modelID: string, options?: Record) => Promise -type CustomVarsLoader = (options: Record) => Record +type CustomVarsLoader = ( + options: Record, + envs: Record, +) => Record type CustomDiscoverModels = () => Promise> type CustomLoader = (provider: Info) => Effect.Effect<{ autoload: boolean @@ -125,6 +129,13 @@ type CustomLoader = (provider: Info) => Effect.Effect<{ vars?: CustomVarsLoader options?: Record discoverModels?: CustomDiscoverModels + // Set on a non-autoload return when the loader cannot reconstruct its + // options from env alone (auth metadata, dynamic SDK imports, derived URLs). + // `requiresRestart: true` excludes the provider from `cleanedDatabase` so a late + // env never surfaces it with stale options; a one-time warn is emitted + // instead. Single-env-key providers (anthropic, openai, ...) do not need it. + // TODO(rerunOn): replace with declarative `rerunOn: string[]` env-dep list. + requiresRestart?: boolean }> type CustomDep = { @@ -132,6 +143,7 @@ type CustomDep = { config: () => Effect.Effect env: () => Effect.Effect> get: (key: string) => Effect.Effect + set: (key: string, value: string) => Effect.Effect } function useLanguageModel(sdk: any) { @@ -219,6 +231,7 @@ function custom(dep: CustomDep): Record { if (!resource && !provider.options?.baseURL) { return { autoload: false, + requiresRestart: true, async getModel() { throw new Error( "AZURE_RESOURCE_NAME is missing, set it using env var or reconnecting the azure provider and setting it", @@ -235,26 +248,46 @@ function custom(dep: CustomDep): Record { options: { resourceName: resource, }, - vars(_options): Record { - if (resource) { + vars(_options, envs): Record { + // Re-read live env at call time so late `AZURE_RESOURCE_NAME` env + // rotation propagates through `${AZURE_RESOURCE_NAME}` baseURL + // templating in `resolveSDK`. Precedence preserved: + // provider.options.resourceName > auth.metadata.resourceName > env. + const liveResource = [ + provider.options?.resourceName, + auth?.type === "api" ? auth.metadata?.resourceName : undefined, + envs["AZURE_RESOURCE_NAME"], + ].find((name) => typeof name === "string" && name.trim() !== "") + if (liveResource) { return { - AZURE_RESOURCE_NAME: resource, + AZURE_RESOURCE_NAME: liveResource, } } return {} }, } }), - "azure-cognitive-services": Effect.fnUntraced(function* () { + "azure-cognitive-services": Effect.fnUntraced(function* (provider: Info) { const resourceName = yield* dep.get("AZURE_COGNITIVE_SERVICES_RESOURCE_NAME") + if (!resourceName && !provider.options?.baseURL) { + return { + autoload: false, + requiresRestart: true, + async getModel() { + throw new Error( + "AZURE_COGNITIVE_SERVICES_RESOURCE_NAME is missing, set it via env var or configure provider.options.baseURL", + ) + }, + } + } return { autoload: false, async getModel(sdk: any, modelID: string, options?: Record) { return selectAzureLanguageModel(sdk, modelID, Boolean(options?.["useCompletionUrls"])) }, - options: { - baseURL: resourceName ? `https://${resourceName}.cognitiveservices.azure.com/openai` : undefined, - }, + options: resourceName + ? { baseURL: `https://${resourceName}.cognitiveservices.azure.com/openai` } + : {}, } }), "amazon-bedrock": Effect.fnUntraced(function* () { @@ -274,26 +307,21 @@ function custom(dep: CustomDep): Record { const awsAccessKeyId = env["AWS_ACCESS_KEY_ID"] - // TODO: Using process.env directly because Env.set only updates a process.env shallow copy, - // until the scope of the Env API is clarified (test only or runtime?) - const awsBearerToken = iife(() => { - const envToken = process.env.AWS_BEARER_TOKEN_BEDROCK - if (envToken) return envToken - if (auth?.type === "api") { - process.env.AWS_BEARER_TOKEN_BEDROCK = auth.key - return auth.key - } - return undefined - }) + const envBearerToken = env["AWS_BEARER_TOKEN_BEDROCK"] + const awsBearerToken = envBearerToken ?? (auth?.type === "api" ? auth.key : undefined) + if (!envBearerToken && awsBearerToken) { + // TODO(multi-instance): see env/index.ts docstring. + yield* dep.set("AWS_BEARER_TOKEN_BEDROCK", awsBearerToken) + } const awsWebIdentityTokenFile = env["AWS_WEB_IDENTITY_TOKEN_FILE"] const containerCreds = Boolean( - process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI || process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI, + env["AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"] || env["AWS_CONTAINER_CREDENTIALS_FULL_URI"], ) if (!profile && !awsAccessKeyId && !awsBearerToken && !awsWebIdentityTokenFile && !containerCreds) - return { autoload: false } + return { autoload: false, requiresRestart: true } const { fromNodeProviderChain } = yield* Effect.promise(() => import("@aws-sdk/credential-providers")) @@ -468,14 +496,27 @@ function custom(dep: CustomDep): Record { ) const autoload = Boolean(project) - if (!autoload) return { autoload: false } + if (!autoload) return { autoload: false, requiresRestart: true } return { autoload: true, - vars(_options: Record) { - const endpoint = location === "global" ? "aiplatform.googleapis.com" : `${location}-aiplatform.googleapis.com` + vars(_options: Record, envs: Record) { + // Re-read live env at call time so post-init rotation of any of the + // GOOGLE_VERTEX_* / GOOGLE_CLOUD_* / GCP_PROJECT envs propagates + // through baseURL templating. Falls back to init-time capture for + // parity with the non-rotated case. + const liveProject = + envs["GOOGLE_VERTEX_PROJECT"] ?? + envs["GOOGLE_CLOUD_PROJECT"] ?? + envs["GCP_PROJECT"] ?? + envs["GCLOUD_PROJECT"] ?? + project + const liveLocation = + envs["GOOGLE_VERTEX_LOCATION"] ?? envs["GOOGLE_CLOUD_LOCATION"] ?? envs["VERTEX_LOCATION"] ?? location + const endpoint = + liveLocation === "global" ? "aiplatform.googleapis.com" : `${liveLocation}-aiplatform.googleapis.com` return { - ...(project && { GOOGLE_VERTEX_PROJECT: project }), - GOOGLE_VERTEX_LOCATION: location, + ...(liveProject && { GOOGLE_VERTEX_PROJECT: liveProject }), + GOOGLE_VERTEX_LOCATION: liveLocation, GOOGLE_VERTEX_ENDPOINT: endpoint, } }, @@ -505,7 +546,7 @@ function custom(dep: CustomDep): Record { const project = env["GOOGLE_CLOUD_PROJECT"] ?? env["GCP_PROJECT"] ?? env["GCLOUD_PROJECT"] const location = env["GOOGLE_CLOUD_LOCATION"] ?? env["VERTEX_LOCATION"] ?? "global" const autoload = Boolean(project) - if (!autoload) return { autoload: false } + if (!autoload) return { autoload: false, requiresRestart: true } return { autoload: true, options: { @@ -520,23 +561,22 @@ function custom(dep: CustomDep): Record { }), "sap-ai-core": Effect.fnUntraced(function* () { const auth = yield* dep.auth("sap-ai-core") - // TODO: Using process.env directly because Env.set only updates a shallow copy (not process.env), - // until the scope of the Env API is clarified (test only or runtime?) - const envServiceKey = iife(() => { - const envAICoreServiceKey = process.env.AICORE_SERVICE_KEY - if (envAICoreServiceKey) return envAICoreServiceKey - if (auth?.type === "api") { - process.env.AICORE_SERVICE_KEY = auth.key - return auth.key - } - return undefined - }) - const deploymentId = process.env.AICORE_DEPLOYMENT_ID - const resourceGroup = process.env.AICORE_RESOURCE_GROUP + const env = yield* dep.env() + const envAICoreServiceKey = env["AICORE_SERVICE_KEY"] + const envServiceKey = envAICoreServiceKey ?? (auth?.type === "api" ? auth.key : undefined) + if (!envServiceKey) { + return { autoload: false, requiresRestart: true } + } + if (!envAICoreServiceKey) { + // TODO(multi-instance): see env/index.ts docstring. + yield* dep.set("AICORE_SERVICE_KEY", envServiceKey) + } + const deploymentId = env["AICORE_DEPLOYMENT_ID"] + const resourceGroup = env["AICORE_RESOURCE_GROUP"] return { - autoload: !!envServiceKey, - options: envServiceKey ? { deploymentId, resourceGroup } : {}, + autoload: true, + options: { deploymentId, resourceGroup }, async getModel(sdk: any, modelID: string) { return sdk(modelID) }, @@ -707,6 +747,7 @@ function custom(dep: CustomDep): Record { if (!accountId) return { autoload: false, + requiresRestart: true, async getModel() { throw new Error( "CLOUDFLARE_ACCOUNT_ID is missing. Set it with: export CLOUDFLARE_ACCOUNT_ID=", @@ -732,9 +773,12 @@ function custom(dep: CustomDep): Record { async getModel(sdk: any, modelID: string) { return sdk.languageModel(modelID) }, - vars(_options) { + vars(_options, envs) { + // Re-read live env at call time so late `CLOUDFLARE_ACCOUNT_ID` + // rotation propagates through baseURL templating. Falls back to + // init-time capture (env or auth metadata) when env is unset. return { - CLOUDFLARE_ACCOUNT_ID: accountId, + CLOUDFLARE_ACCOUNT_ID: envs["CLOUDFLARE_ACCOUNT_ID"] ?? accountId, } }, } @@ -756,6 +800,7 @@ function custom(dep: CustomDep): Record { ].filter((x): x is string => Boolean(x)) return { autoload: false, + requiresRestart: true, async getModel() { throw new Error( `${missing.join(" and ")} missing. Set with: ${missing.map((x) => `export ${x}=`).join(" && ")}`, @@ -999,17 +1044,106 @@ export interface Interface { readonly defaultModel: () => Effect.Effect<{ providerID: ProviderID; modelID: ModelID }> } +interface BoundedAsyncCache { + get(key: string): V | undefined + getOrLoad(key: string, build: () => Promise): Promise +} + +// Capacity-bounded LRU + in-flight Promise dedup. Two concurrent callers with +// the same key share one build; rejections evict the in-flight slot so a +// transient init failure never poisons the cache. Insertion order in the +// underlying `Map` doubles as LRU order — `get` re-inserts the key to refresh +// recency. Eviction is O(1) (drop the iterator's first key when at capacity). +// +// Bounds protect long-running daemons against rotation-driven leaks: every +// distinct credential or templated baseURL produces a new `hashIdentity`, so +// without a cap the underlying maps would grow once per rotation forever. +function makeBoundedAsyncCache(capacity: number): BoundedAsyncCache { + const entries = new Map() + const inflight = new Map>() + + const set = (key: string, value: V) => { + if (entries.has(key)) entries.delete(key) + else if (entries.size >= capacity) { + const oldest = entries.keys().next().value + if (oldest !== undefined) entries.delete(oldest) + } + entries.set(key, value) + } + + const get = (key: string) => { + const v = entries.get(key) + if (v === undefined) return undefined + entries.delete(key) + entries.set(key, v) + return v + } + + return { + get, + getOrLoad(key, build) { + const cached = get(key) + if (cached !== undefined) return Promise.resolve(cached) + const pending = inflight.get(key) + if (pending) return pending + const promise = build().then( + (v) => { + set(key, v) + inflight.delete(key) + return v + }, + (err) => { + inflight.delete(key) + throw err + }, + ) + inflight.set(key, promise) + return promise + }, + } +} + +const SDK_CACHE_CAPACITY = 256 +const LANGUAGE_MODEL_CACHE_CAPACITY = 256 + interface State { - models: Map - providers: Record + models: BoundedAsyncCache + // Init-time providers from auth/config/loader. Readers MUST go through + // `currentProviders(...)` to apply the live env overlay on top. + cachedProviders: Record catalog: Record - sdk: Map + sdk: BoundedAsyncCache modelLoaders: Record varsLoaders: Record + // Env-eligible providers with models pre-filtered. Frozen at init. + cleanedDatabase: Readonly> + // Providers excluded from late-env overlay (loader returned `requiresRestart: true`). + loaderRequiresRestart: Set + // Per-instance dedupe set for `warnRestartRequired`. + warnedRestartRequired: Set } export class Service extends Context.Service()("@opencode/Provider") {} +function warnRestartRequired( + s: Pick, + envs: Record, +) { + if (s.loaderRequiresRestart.size === 0) return + for (const providerID of s.loaderRequiresRestart) { + if (s.warnedRestartRequired.has(providerID)) continue + const info = s.catalog[providerID] + if (!info) continue + const present = info.env.find((k) => isNonBlank(envs[k])) + if (!present) continue + s.warnedRestartRequired.add(providerID) + log.warn("late env detected for provider that requires restart", { + providerID, + env: present, + }) + } +} + function cost(c: ModelsDev.Model["cost"]): Model["cost"] { const result: Model["cost"] = { input: c?.input ?? 0, @@ -1160,6 +1294,11 @@ function modelSuggestions(provider: Info | undefined, modelID: ModelID, enableEx .map((item) => item.id) } +/** + * @internal Bare layer without sub-layer wiring; exposed only so tests can + * compose with stubbed sub-layers (e.g. RuntimeFlags overrides). Production + * code MUST use `defaultLayer` instead. + */ export const layer = Layer.effect( Service, Effect.gen(function* () { @@ -1181,22 +1320,24 @@ export const layer = Layer.effect( const database = mapValues(catalog, toPublicInfo) const providers: Record = {} as Record - const languages = new Map() + const languages = makeBoundedAsyncCache(LANGUAGE_MODEL_CACHE_CAPACITY) const modelLoaders: { [providerID: string]: CustomModelLoader } = {} const varsLoaders: { [providerID: string]: CustomVarsLoader } = {} - const sdk = new Map() + const sdk = makeBoundedAsyncCache(SDK_CACHE_CAPACITY) const discoveryLoaders: { [providerID: string]: CustomDiscoverModels } = {} + const loaderRequiresRestart = new Set() const dep = { auth: (id: string) => auth.get(id).pipe(Effect.orDie), config: () => config.get(), env: () => env.all(), get: (key: string) => env.get(key), + set: (key: string, value: string) => env.set(key, value), } log.info("init") @@ -1349,18 +1490,8 @@ export const layer = Layer.effect( database[providerID] = parsed } - // load env + // env snapshot for source-label preservation in the custom loop below. const envs = yield* env.all() - for (const [id, provider] of Object.entries(database)) { - const providerID = ProviderID.make(id) - if (disabled.has(providerID)) continue - const apiKey = provider.env.map((item) => envs[item]).find(Boolean) - if (!apiKey) continue - mergeProvider(providerID, { - source: "env", - key: provider.env.length === 1 ? apiKey : undefined, - }) - } // load apikeys const auths = yield* auth.all().pipe(Effect.orDie) @@ -1405,13 +1536,48 @@ export const layer = Layer.effect( continue } const result = yield* fn(data) - if (result && (result.autoload || providers[providerID])) { - if (result.getModel) modelLoaders[providerID] = result.getModel - if (result.vars) varsLoaders[providerID] = result.vars - if (result.discoverModels) discoveryLoaders[providerID] = result.discoverModels + if (!result) continue + + if (!result.autoload && result.requiresRestart === true) { + loaderRequiresRestart.add(providerID) + } + + // Always register loaders so a late env-promoted provider can find + // its modelLoader / varsLoader / discoverModels. + if (result.getModel) modelLoaders[providerID] = result.getModel + if (result.vars) varsLoaders[providerID] = result.vars + if (result.discoverModels) discoveryLoaders[providerID] = result.discoverModels + + if (result.autoload) { const opts = result.options ?? {} - const patch: Partial = providers[providerID] ? { options: opts } : { source: "custom", options: opts } + if (providers[providerID]) { + mergeProvider(providerID, { options: opts }) + continue + } + // autoload=true with no prior auth/config entry. Most common cause: + // env-conditional autoload (gitlab, cloudflare-ai-gateway) fired + // because env was present at init. Preserve source="env" so labels + // match the pre-fix behavior of the deleted env-stamp loop. + const envKey = data.env.length > 0 ? data.env.map((k) => envs[k]).find(Boolean) : undefined + const patch: Partial = envKey + ? { source: "env", options: opts, key: data.env.length === 1 ? envKey : undefined } + : { source: "custom", options: opts } mergeProvider(providerID, patch) + continue + } + + // Non-autoload loader returned options: merge into providers if + // already stamped (auth/config), otherwise fold into database so a + // late env detection picks them up via the overlay. + if (result.options) { + if (providers[providerID]) { + mergeProvider(providerID, { options: result.options }) + } else { + database[providerID] = { + ...data, + options: mergeDeep(data.options ?? {}, result.options) as Record, + } + } } } @@ -1441,6 +1607,9 @@ export const layer = Layer.effect( }) } + // toPublicInfo (line ~982) deep-clones via JSON, and mergeDeep produces + // fresh objects, so providers[id].models and database[id].models hold + // distinct Model instances. prepareModel inside both loops is safe. for (const [id, provider] of Object.entries(providers)) { const providerID = ProviderID.make(id) if (!isProviderAllowed(providerID)) { @@ -1451,36 +1620,9 @@ export const layer = Layer.effect( const configProvider = cfg.provider?.[providerID] for (const [modelID, model] of Object.entries(provider.models)) { - model.api.id = model.api.id ?? model.id ?? modelID - if ( - // These chat aliases are invalid for the special handling in the - // built-in providers below, but custom providers may support them. - (modelID === "gpt-5-chat-latest" && - (providerID === ProviderID.openai || - providerID === ProviderID.githubCopilot || - providerID === ProviderID.openrouter)) || - (providerID === ProviderID.openrouter && modelID === "openai/gpt-5-chat") - ) - delete provider.models[modelID] - if (model.status === "alpha" && !runtimeFlags.enableExperimentalModels) delete provider.models[modelID] - if (model.status === "deprecated") delete provider.models[modelID] - if ( - (configProvider?.blacklist && configProvider.blacklist.includes(modelID)) || - (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) - ) + prepareModel(model, modelID, configProvider?.models?.[modelID]?.variants) + if (!keepModel(model, modelID, providerID, configProvider, runtimeFlags.enableExperimentalModels)) { delete provider.models[modelID] - - if (!model.variants || Object.keys(model.variants).length === 0) { - model.variants = mapValues(ProviderTransform.variants(model), (v) => v) - } - - const configVariants = configProvider?.models?.[modelID]?.variants - if (configVariants && model.variants) { - const merged = mergeDeep(model.variants, configVariants) - model.variants = mapValues( - pickBy(merged, (v) => !v.disabled), - (v) => omit(v, ["disabled"]), - ) } } @@ -1492,25 +1634,62 @@ export const layer = Layer.effect( log.info("found", { providerID }) } + // Build cleanedDatabase. Apply the same prepareModel + keepModel pipeline + // so a late env-detected provider exposes the same models, with the same + // variants, as it would if env had been present at init. + const cleanedDatabase: Record = {} + for (const [id, info] of Object.entries(database)) { + if (disabled.has(id)) continue + if (enabled && !enabled.has(id)) continue + if (loaderRequiresRestart.has(ProviderID.make(id))) continue + if (info.env.length === 0) continue + const providerID = ProviderID.make(id) + const configProvider = cfg.provider?.[id] + const filteredModels: Record = {} + for (const [modelID, model] of Object.entries(info.models)) { + prepareModel(model, modelID, configProvider?.models?.[modelID]?.variants) + if (!keepModel(model, modelID, providerID, configProvider, runtimeFlags.enableExperimentalModels)) continue + filteredModels[modelID] = model + } + if (Object.keys(filteredModels).length === 0) continue + cleanedDatabase[providerID] = { ...info, models: filteredModels } + } + // Back the comment-only `Readonly<...>` invariant at runtime so a + // future refactor that accidentally mutates `cleanedDatabase` + // (e.g. assigning a new entry post-init) fails loudly under strict + // mode instead of silently desynchronizing the env overlay. + Object.freeze(cleanedDatabase) + return { models: languages, - providers, + cachedProviders: providers, catalog, sdk, modelLoaders, varsLoaders, + cleanedDatabase, + loaderRequiresRestart, + warnedRestartRequired: new Set(), } }), ) - const list = Effect.fn("Provider.list")(() => InstanceState.use(state, (s) => s.providers)) + const observeProviders = Effect.fnUntraced(function* (s: State) { + const envs = yield* env.all() + warnRestartRequired(s, envs) + return currentProviders(s, envs) + }) + + const list = Effect.fn("Provider.list")(function* () { + const s = yield* InstanceState.get(state) + return yield* observeProviders(s) + }) - async function resolveSDK(model: Model, s: State, envs: Record) { + async function resolveSDK(model: Model, s: State, provider: Info, envs: Record) { try { using _ = log.time("getSDK", { providerID: model.providerID, }) - const provider = s.providers[model.providerID] const options = { ...provider.options } if (model.providerID === "google-vertex" && !model.api.npm.includes("@ai-sdk/openai-compatible")) { @@ -1528,7 +1707,7 @@ export const layer = Layer.effect( const loader = s.varsLoaders[model.providerID] if (loader) { - const vars = loader(options) + const vars = loader(options, envs) for (const [key, value] of Object.entries(vars)) { const field = "${" + key + "}" url = url.replaceAll(field, value) @@ -1550,117 +1729,115 @@ export const layer = Layer.effect( ...model.headers, } - const key = Hash.fast( - JSON.stringify({ - providerID: model.providerID, - npm: model.api.npm, - options, - }), - ) - const existing = s.sdk.get(key) - if (existing) return existing - - const customFetch = options["fetch"] - const chunkTimeout = options["chunkTimeout"] - delete options["chunkTimeout"] - - options["fetch"] = async (input: any, init?: BunFetchRequestInit) => { - const fetchFn = customFetch ?? fetch - const opts = init ?? {} - const chunkAbortCtl = typeof chunkTimeout === "number" && chunkTimeout > 0 ? new AbortController() : undefined - const signals: AbortSignal[] = [] - - if (opts.signal) signals.push(opts.signal) - if (chunkAbortCtl) signals.push(chunkAbortCtl.signal) - if (options["timeout"] !== undefined && options["timeout"] !== null && options["timeout"] !== false) - signals.push(AbortSignal.timeout(options["timeout"])) - - const combined = signals.length === 0 ? null : signals.length === 1 ? signals[0] : AbortSignal.any(signals) - if (combined) opts.signal = combined - - // Strip openai itemId metadata following what codex does - if ( - (model.api.npm === "@ai-sdk/openai" || model.api.npm === "@ai-sdk/azure") && - opts.body && - opts.method === "POST" - ) { - const body = JSON.parse(opts.body as string) - const keepIds = body.store === true - if (!keepIds && Array.isArray(body.input)) { - for (const item of body.input) { - if ("id" in item) { - delete item.id + const key = hashIdentity({ + providerID: model.providerID, + npm: model.api.npm, + options, + }) + + return (await s.sdk.getOrLoad(key, async () => { + const customFetch = options["fetch"] + const chunkTimeout = options["chunkTimeout"] + delete options["chunkTimeout"] + + options["fetch"] = async (input: any, init?: BunFetchRequestInit) => { + const fetchFn = customFetch ?? fetch + const opts = init ?? {} + const chunkAbortCtl = + typeof chunkTimeout === "number" && chunkTimeout > 0 ? new AbortController() : undefined + const signals: AbortSignal[] = [] + + if (opts.signal) signals.push(opts.signal) + if (chunkAbortCtl) signals.push(chunkAbortCtl.signal) + if (options["timeout"] !== undefined && options["timeout"] !== null && options["timeout"] !== false) + signals.push(AbortSignal.timeout(options["timeout"])) + + const combined = signals.length === 0 ? null : signals.length === 1 ? signals[0] : AbortSignal.any(signals) + if (combined) opts.signal = combined + + // Strip openai itemId metadata following what codex does + if ( + (model.api.npm === "@ai-sdk/openai" || model.api.npm === "@ai-sdk/azure") && + opts.body && + opts.method === "POST" + ) { + const body = JSON.parse(opts.body as string) + const keepIds = body.store === true + if (!keepIds && Array.isArray(body.input)) { + for (const item of body.input) { + if ("id" in item) { + delete item.id + } } + opts.body = JSON.stringify(body) } - opts.body = JSON.stringify(body) } + + const res = await fetchFn(input, { + ...opts, + // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682 + timeout: false, + }) + + if (!chunkAbortCtl) return res + return wrapSSE(res, chunkTimeout, chunkAbortCtl) } - const res = await fetchFn(input, { - ...opts, - // @ts-ignore see here: https://github.com/oven-sh/bun/issues/16682 - timeout: false, - }) + const bundledLoader = BUNDLED_PROVIDERS[model.api.npm] + if (bundledLoader) { + log.info("using bundled provider", { + providerID: model.providerID, + pkg: model.api.npm, + }) + const factory = await bundledLoader() + return factory({ + name: model.providerID, + ...options, + }) + } - if (!chunkAbortCtl) return res - return wrapSSE(res, chunkTimeout, chunkAbortCtl) - } + let installedPath: string + if (!model.api.npm.startsWith("file://")) { + const item = await Npm.add(model.api.npm) + if (!item.entrypoint) throw new Error(`Package ${model.api.npm} has no import entrypoint`) + installedPath = item.entrypoint + } else { + log.info("loading local provider", { pkg: model.api.npm }) + installedPath = model.api.npm + } - const bundledLoader = BUNDLED_PROVIDERS[model.api.npm] - if (bundledLoader) { - log.info("using bundled provider", { - providerID: model.providerID, - pkg: model.api.npm, - }) - const factory = await bundledLoader() - const loaded = factory({ + // `installedPath` is a local entry path or an existing `file://` URL. Normalize + // only path inputs so Node on Windows accepts the dynamic import. + const importSpec = installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href + const mod = await import(importSpec) + + const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] + return fn({ name: model.providerID, ...options, }) - s.sdk.set(key, loaded) - return loaded as SDK - } - - let installedPath: string - if (!model.api.npm.startsWith("file://")) { - const item = await Npm.add(model.api.npm) - if (!item.entrypoint) throw new Error(`Package ${model.api.npm} has no import entrypoint`) - installedPath = item.entrypoint - } else { - log.info("loading local provider", { pkg: model.api.npm }) - installedPath = model.api.npm - } - - // `installedPath` is a local entry path or an existing `file://` URL. Normalize - // only path inputs so Node on Windows accepts the dynamic import. - const importSpec = installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href - const mod = await import(importSpec) - - const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] - const loaded = fn({ - name: model.providerID, - ...options, - }) - s.sdk.set(key, loaded) - return loaded as SDK + })) as SDK } catch (e) { throw new InitError({ providerID: model.providerID, cause: e }) } } - const getProvider = Effect.fn("Provider.getProvider")((providerID: ProviderID) => - InstanceState.use(state, (s) => s.providers[providerID]), - ) + const getProvider = Effect.fn("Provider.getProvider")(function* (providerID: ProviderID) { + const s = yield* InstanceState.get(state) + const all = yield* observeProviders(s) + return all[providerID] + }) const getModel = Effect.fn("Provider.getModel")(function* (providerID: ProviderID, modelID: ModelID) { const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] + const all = yield* observeProviders(s) + const provider = all[providerID] if (!provider) { const catalogProvider = s.catalog[providerID] const suggestions = catalogProvider ? modelSuggestions(catalogProvider, modelID, runtimeFlags.enableExperimentalModels) : fuzzysort - .go(providerID, Object.keys({ ...s.catalog, ...s.providers }), { limit: 3, threshold: -10000 }) + .go(providerID, Object.keys({ ...s.catalog, ...all }), { limit: 3, threshold: -10000 }) .map((m) => m.target) return yield* new ModelNotFoundError({ providerID, modelID, suggestions }) } @@ -1679,22 +1856,39 @@ export const layer = Layer.effect( const getLanguage = Effect.fn("Provider.getLanguage")(function* (model: Model) { const s = yield* InstanceState.get(state) const envs = yield* env.all() - const key = `${model.providerID}/${model.id}` - if (s.models.has(key)) return s.models.get(key)! + const all = currentProviders(s, envs) + const provider = all[model.providerID] + if (!provider) { + const suggestions = fuzzysort + .go(model.providerID, Object.keys({ ...s.catalog, ...all }), { limit: 3, threshold: -10000 }) + .map((m) => m.target) + return yield* new ModelNotFoundError({ providerID: model.providerID, modelID: model.id, suggestions }) + } + // Mirror s.sdk's hashIdentity keying (see resolveSDK) so the cached + // LanguageModel invalidates whenever the underlying SDK rebuilds — + // env key rotation, multi-env credential changes, env-templated + // baseURL mutations, and option changes all flow through the same + // invariant. + const cacheKey = hashIdentity({ + providerID: model.providerID, + modelID: model.id, + key: provider.key, + options: { ...provider.options, ...model.options }, + }) + const cached = s.models.get(cacheKey) + if (cached !== undefined) return cached - const provider = s.providers[model.providerID] return yield* EffectPromise.refineRejection( - async () => { - const sdk = await resolveSDK(model, s, envs) - const language = s.modelLoaders[model.providerID] - ? await s.modelLoaders[model.providerID](sdk, model.api.id, { - ...provider.options, - ...model.options, - }) - : sdk.languageModel(model.api.id) - s.models.set(key, language) - return language - }, + () => + s.models.getOrLoad(cacheKey, async () => { + const sdk = await resolveSDK(model, s, provider, envs) + return s.modelLoaders[model.providerID] + ? await s.modelLoaders[model.providerID](sdk, model.api.id, { + ...provider.options, + ...model.options, + }) + : sdk.languageModel(model.api.id) + }), (cause) => cause instanceof NoSuchModelError ? new ModelNotFoundError({ modelID: model.id, providerID: model.providerID, cause }) @@ -1704,7 +1898,7 @@ export const layer = Layer.effect( const closest = Effect.fn("Provider.closest")(function* (providerID: ProviderID, query: string[]) { const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] + const provider = (yield* observeProviders(s))[providerID] if (!provider) return undefined for (const item of query) { for (const modelID of Object.keys(provider.models)) { @@ -1725,7 +1919,7 @@ export const layer = Layer.effect( } const s = yield* InstanceState.get(state) - const provider = s.providers[providerID] + const provider = (yield* observeProviders(s))[providerID] if (!provider) return undefined let priority = [ @@ -1777,6 +1971,7 @@ export const layer = Layer.effect( if (cfg.model) return parseModel(cfg.model) const s = yield* InstanceState.get(state) + const all = yield* observeProviders(s) const recent = yield* fs.readJson(path.join(Global.Path.state, "model.json")).pipe( Effect.map((x): { providerID: ProviderID; modelID: ModelID }[] => { if (!isRecord(x) || !Array.isArray(x.recent)) return [] @@ -1790,13 +1985,13 @@ export const layer = Layer.effect( Effect.catch(() => Effect.succeed([] as { providerID: ProviderID; modelID: ModelID }[])), ) for (const entry of recent) { - const provider = s.providers[entry.providerID] + const provider = all[entry.providerID] if (!provider) continue if (!provider.models[entry.modelID]) continue return { providerID: entry.providerID, modelID: entry.modelID } } - const provider = Object.values(s.providers).find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id)) + const provider = Object.values(all).find((p) => !cfg.provider || Object.keys(cfg.provider).includes(p.id)) if (!provider) throw new Error("no providers found") const [model] = sort(Object.values(provider.models)) if (!model) throw new Error("no models found") @@ -1840,4 +2035,42 @@ export function parseModel(model: string) { } } +type ConfigProviderEntry = NonNullable[string]> + +function keepModel( + model: Model, + modelID: string, + providerID: ProviderID, + configProvider: ConfigProviderEntry | undefined, + enableExperimentalModels: boolean, +): boolean { + if ( + (modelID === "gpt-5-chat-latest" && + (providerID === ProviderID.openai || + providerID === ProviderID.githubCopilot || + providerID === ProviderID.openrouter)) || + (providerID === ProviderID.openrouter && modelID === "openai/gpt-5-chat") + ) + return false + if (model.status === "alpha" && !enableExperimentalModels) return false + if (model.status === "deprecated") return false + if (configProvider?.blacklist?.includes(modelID)) return false + if (configProvider?.whitelist && !configProvider.whitelist.includes(modelID)) return false + return true +} + +function prepareModel(model: Model, modelID: string, configVariants: Record | undefined) { + model.api.id = model.api.id ?? model.id ?? modelID + if (!model.variants || Object.keys(model.variants).length === 0) { + model.variants = mapValues(ProviderTransform.variants(model), (v) => v) + } + if (configVariants && model.variants) { + const merged = mergeDeep(model.variants, configVariants) + model.variants = mapValues( + pickBy(merged, (v) => !v.disabled), + (v) => omit(v, ["disabled"]), + ) + } +} + export * as Provider from "./provider" diff --git a/packages/opencode/test/preload.ts b/packages/opencode/test/preload.ts index 24b804819ed3..28d1236613b2 100644 --- a/packages/opencode/test/preload.ts +++ b/packages/opencode/test/preload.ts @@ -3,8 +3,9 @@ import os from "os" import path from "path" import fs from "fs/promises" +import fsSync from "fs" import { setTimeout as sleep } from "node:timers/promises" -import { afterAll } from "bun:test" +import { afterAll, afterEach } from "bun:test" // Set XDG env vars FIRST, before any src/ imports const dir = path.join(os.tmpdir(), "opencode-test-data-" + process.pid) @@ -52,29 +53,51 @@ const cacheDir = path.join(dir, "cache", "opencode") await fs.mkdir(cacheDir, { recursive: true }) await fs.writeFile(path.join(cacheDir, "version"), "14") -// Clear provider and server auth env vars to ensure clean test state -delete process.env["ANTHROPIC_API_KEY"] -delete process.env["OPENAI_API_KEY"] -delete process.env["GOOGLE_API_KEY"] -delete process.env["GOOGLE_GENERATIVE_AI_API_KEY"] -delete process.env["AZURE_OPENAI_API_KEY"] -delete process.env["AWS_ACCESS_KEY_ID"] -delete process.env["AWS_PROFILE"] -delete process.env["AWS_REGION"] -delete process.env["AWS_BEARER_TOKEN_BEDROCK"] -delete process.env["OPENROUTER_API_KEY"] -delete process.env["LLM_GATEWAY_API_KEY"] -delete process.env["GROQ_API_KEY"] -delete process.env["MISTRAL_API_KEY"] -delete process.env["PERPLEXITY_API_KEY"] -delete process.env["TOGETHER_API_KEY"] -delete process.env["XAI_API_KEY"] -delete process.env["DEEPSEEK_API_KEY"] -delete process.env["FIREWORKS_API_KEY"] -delete process.env["CEREBRAS_API_KEY"] -delete process.env["SAMBANOVA_API_KEY"] -delete process.env["OPENCODE_SERVER_PASSWORD"] -delete process.env["OPENCODE_SERVER_USERNAME"] +// Clear provider/server auth env vars so a contributor's shell can never +// leak a real credential into a test's `connected[]` assertion. Sourced +// programmatically from the models-api fixture so this list grows with +// models.dev without manual maintenance. Augmented with non-fixture keys +// referenced by src/ (OPENCODE_CONSOLE_TOKEN, GITLAB_INSTANCE_URL, +// AICORE_DEPLOYMENT_ID/RESOURCE_GROUP, the AWS chain helpers) and the +// synthetic test keys used by overlay/provider tests. +const fixtureEnv: string[] = (() => { + const fixturePath = process.env["OPENCODE_MODELS_PATH"] + if (!fixturePath) return [] + const data: Record = JSON.parse(fsSync.readFileSync(fixturePath, "utf8")) + const seen = new Set() + for (const provider of Object.values(data)) for (const key of provider.env ?? []) seen.add(key) + return [...seen] +})() +const extraEnv = [ + "GOOGLE_API_KEY", + "AZURE_OPENAI_API_KEY", + "AWS_PROFILE", + "AWS_REGION", + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", + "AWS_CONTAINER_CREDENTIALS_FULL_URI", + "AWS_WEB_IDENTITY_TOKEN_FILE", + "AWS_ROLE_ARN", + "AICORE_DEPLOYMENT_ID", + "AICORE_RESOURCE_GROUP", + "GOOGLE_CLOUD_PROJECT", + "GOOGLE_CLOUD_LOCATION", + "GCP_PROJECT", + "GCLOUD_PROJECT", + "VERTEX_LOCATION", + "CF_AIG_TOKEN", + "GITLAB_INSTANCE_URL", + "OPENCODE_CONSOLE_TOKEN", + "SINGLE_ENV_KEY", + "MULTI_ENV_KEY_1", + "MULTI_ENV_KEY_2", + "PRIMARY_KEY", + "FALLBACK_KEY", + "CUSTOM_API_KEY", + "OPENCODE_SERVER_PASSWORD", + "OPENCODE_SERVER_USERNAME", +] +for (const key of fixtureEnv) delete process.env[key] +for (const key of extraEnv) delete process.env[key] // Use in-memory sqlite process.env["OPENCODE_DB"] = ":memory:" @@ -90,3 +113,24 @@ void Log.init({ }) initProjectors() + +// Capture baseline AFTER all preload deletes/sets AND src/ side-effectful +// imports (Log.init, initProjectors) settle. With the env layer now writing +// through to `process.env` directly (see src/env/index.ts), tests that call +// `set()` mutate global state. Without a per-test reset, leaks would cross +// file boundaries (Bun runs all .test.ts files in one shared process per +// `bunfig.toml` defaults). This `afterEach` snapshot/restore makes test +// isolation automatic regardless of contributor discipline. +const ENV_BASELINE: Record = { ...process.env } +afterEach(() => { + for (const key of Object.keys(process.env)) { + if (!(key in ENV_BASELINE)) delete process.env[key] + } + for (const [key, value] of Object.entries(ENV_BASELINE) as [string, string | undefined][]) { + if (value === undefined) { + delete process.env[key] + continue + } + if (process.env[key] !== value) process.env[key] = value + } +}) diff --git a/packages/opencode/test/provider/hash-identity.test.ts b/packages/opencode/test/provider/hash-identity.test.ts new file mode 100644 index 000000000000..5c82a3630c67 --- /dev/null +++ b/packages/opencode/test/provider/hash-identity.test.ts @@ -0,0 +1,39 @@ +import { test, expect } from "bun:test" + +import { hashIdentity } from "@/provider/overlay" + +test("hashIdentity: stable for plain objects", () => { + const a = hashIdentity({ providerID: "x", npm: "@ai-sdk/x", options: { apiKey: "k" } }) + const b = hashIdentity({ providerID: "x", npm: "@ai-sdk/x", options: { apiKey: "k" } }) + expect(a).toBe(b) +}) + +test("hashIdentity: distinct for differing primitive values", () => { + const a = hashIdentity({ providerID: "x", options: { apiKey: "k1" } }) + const b = hashIdentity({ providerID: "x", options: { apiKey: "k2" } }) + expect(a).not.toBe(b) +}) + +test("hashIdentity: handles BigInt without throwing (regression for plugin options)", () => { + expect(() => hashIdentity({ providerID: "x", options: { someBig: 1n } })).not.toThrow() +}) + +test("hashIdentity: BigInt distinct from same-magnitude Number", () => { + const big = hashIdentity({ providerID: "x", options: { v: 1n } }) + const num = hashIdentity({ providerID: "x", options: { v: 1 } }) + expect(big).not.toBe(num) +}) + +test("hashIdentity: named functions disambiguate by name", () => { + function coalesceProvider() {} + function otherFactory() {} + const a = hashIdentity({ providerID: "x", options: { credentialProvider: coalesceProvider } }) + const b = hashIdentity({ providerID: "x", options: { credentialProvider: otherFactory } }) + expect(a).not.toBe(b) +}) + +test("hashIdentity: anonymous arrows collide intentionally (per-call fetch wrapper)", () => { + const a = hashIdentity({ providerID: "x", options: { fetch: () => undefined } }) + const b = hashIdentity({ providerID: "x", options: { fetch: () => undefined } }) + expect(a).toBe(b) +}) diff --git a/packages/opencode/test/provider/overlay.test.ts b/packages/opencode/test/provider/overlay.test.ts new file mode 100644 index 000000000000..ce352995da27 --- /dev/null +++ b/packages/opencode/test/provider/overlay.test.ts @@ -0,0 +1,170 @@ +import { test, expect } from "bun:test" + +import { resolveEnvOverlay, currentProviders } from "@/provider/overlay" +import type { Provider } from "@/provider/provider" +import { ProviderID } from "@/provider/schema" + +const baseInfo = (env: string[], source: Provider.Info["source"] = "env"): Provider.Info => ({ + id: ProviderID.make("x"), + name: "X", + source, + env, + options: {}, + models: {}, +}) + +test("resolveEnvOverlay: no cached, no apiKey -> undefined", () => { + expect(resolveEnvOverlay(undefined, baseInfo(["KEY"]), undefined)).toBeUndefined() +}) + +test("resolveEnvOverlay: no cached, apiKey present -> new env entry with key", () => { + const r = resolveEnvOverlay(undefined, baseInfo(["KEY"]), "abc") + expect(r?.source).toBe("env") + expect(r?.key).toBe("abc") +}) + +test("resolveEnvOverlay: no cached, apiKey present, multi-env candidate -> key undefined", () => { + const r = resolveEnvOverlay(undefined, baseInfo(["A", "B"]), "abc") + expect(r?.source).toBe("env") + expect(r?.key).toBeUndefined() +}) + +test("resolveEnvOverlay: cached env entry, apiKey removed -> drop", () => { + const cached = { ...baseInfo(["KEY"]), key: "old" } + expect(resolveEnvOverlay(cached, baseInfo(["KEY"]), undefined)).toBeUndefined() +}) + +test("resolveEnvOverlay: cached config entry, apiKey absent -> keep cached untouched", () => { + const cached = baseInfo(["KEY"], "config") + expect(resolveEnvOverlay(cached, baseInfo(["KEY"]), undefined)).toBe(cached) +}) + +test("resolveEnvOverlay: cached config entry without key, single-env apiKey -> fill key, keep source", () => { + const cached = { ...baseInfo(["KEY"], "config") } + const r = resolveEnvOverlay(cached, baseInfo(["KEY"]), "abc") + expect(r?.source).toBe("config") + expect(r?.key).toBe("abc") + expect(r).not.toBe(cached) +}) + +test("resolveEnvOverlay: cached api entry with existing key + env present -> cached wins (auth precedence)", () => { + const cached = { ...baseInfo(["KEY"], "api"), key: "auth-key" } + const r = resolveEnvOverlay(cached, baseInfo(["KEY"]), "env-key") + expect(r).toBe(cached) +}) + +test("resolveEnvOverlay: cached env entry, same apiKey -> identity (no churn)", () => { + const cached = { ...baseInfo(["KEY"]), key: "abc" } + expect(resolveEnvOverlay(cached, baseInfo(["KEY"]), "abc")).toBe(cached) +}) + +test("resolveEnvOverlay: cached env entry, rotated apiKey -> new entry with new key", () => { + const cached = { ...baseInfo(["KEY"]), key: "old" } + const r = resolveEnvOverlay(cached, baseInfo(["KEY"]), "new") + expect(r?.key).toBe("new") + expect(r).not.toBe(cached) +}) + +test("resolveEnvOverlay: cached config entry without key, multi-env apiKey -> cached untouched", () => { + const cached = baseInfo(["A", "B"], "config") + const r = resolveEnvOverlay(cached, baseInfo(["A", "B"]), "abc") + expect(r).toBe(cached) +}) + +test("resolveEnvOverlay: cached env entry, multi-env candidate -> existing key preserved", () => { + const cached = { ...baseInfo(["A", "B"]), key: "preserved" } + const r = resolveEnvOverlay(cached, baseInfo(["A", "B"]), "any-env-value") + expect(r).toBe(cached) + expect(r?.key).toBe("preserved") +}) + +test("currentProviders: late env adds provider drawn from cleanedDatabase", () => { + const candidate = baseInfo(["FOO_KEY"]) + const r = currentProviders( + { + cachedProviders: {} as Record, + cleanedDatabase: { foo: candidate } as Record, + }, + { FOO_KEY: "k" }, + ) + expect(r["foo" as ProviderID]?.source).toBe("env") + expect(r["foo" as ProviderID]?.key).toBe("k") +}) + +test("currentProviders: removing env from process drops env-only entry", () => { + const cached = { ...baseInfo(["FOO_KEY"]), key: "k" } + const r = currentProviders( + { + cachedProviders: { foo: cached } as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + {}, + ) + expect(r["foo" as ProviderID]).toBeUndefined() +}) + +test("currentProviders: cached api entry preserved across env presence/absence", () => { + const cached = { ...baseInfo(["FOO_KEY"], "api"), key: "auth" } + const env = currentProviders( + { + cachedProviders: { foo: cached } as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + { FOO_KEY: "envk" }, + ) + expect(env["foo" as ProviderID]).toBe(cached) + + const noEnv = currentProviders( + { + cachedProviders: { foo: cached } as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + {}, + ) + expect(noEnv["foo" as ProviderID]).toBe(cached) +}) + +test("currentProviders: whitespace-only env value treated as absent", () => { + const r = currentProviders( + { + cachedProviders: {} as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + { FOO_KEY: " " }, + ) + expect(r["foo" as ProviderID]).toBeUndefined() +}) + +test("currentProviders: empty-string env value treated as absent", () => { + const r = currentProviders( + { + cachedProviders: {} as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + { FOO_KEY: "" }, + ) + expect(r["foo" as ProviderID]).toBeUndefined() +}) + +test("currentProviders: surrounding-whitespace env value preserved verbatim (not trimmed)", () => { + const r = currentProviders( + { + cachedProviders: {} as Record, + cleanedDatabase: { foo: baseInfo(["FOO_KEY"]) } as Record, + }, + { FOO_KEY: " abc " }, + ) + expect(r["foo" as ProviderID]?.key).toBe(" abc ") +}) + +test("currentProviders: blank env skipped, falls through to next env in multi-env list", () => { + const r = currentProviders( + { + cachedProviders: {} as Record, + cleanedDatabase: { foo: baseInfo(["A", "B"]) } as Record, + }, + { A: " ", B: "real-key" }, + ) + expect(r["foo" as ProviderID]?.source).toBe("env") + expect(r["foo" as ProviderID]?.key).toBeUndefined() +}) diff --git a/packages/opencode/test/provider/provider.test.ts b/packages/opencode/test/provider/provider.test.ts index 579867b2a2e5..22e652b83c8a 100644 --- a/packages/opencode/test/provider/provider.test.ts +++ b/packages/opencode/test/provider/provider.test.ts @@ -4,6 +4,8 @@ import path from "path" import { disposeAllInstances, tmpdir } from "../fixture/fixture" import { Global } from "@opencode-ai/core/global" +import { Log } from "@opencode-ai/core/util/log" +import { Instance } from "../../src/project/instance" import type { InstanceContext } from "../../src/project/instance-context" import { WithInstance } from "../../src/project/with-instance" import { Plugin } from "../../src/plugin/index" @@ -2814,3 +2816,657 @@ test("opencode loader keeps paid models when auth exists", async () => { } } }) + +test("list() reflects env var set after first call", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + const before = await list() + expect(before[ProviderID.anthropic]).toBeUndefined() + + set("ANTHROPIC_API_KEY", "late-key") + const after = await list() + expect(after[ProviderID.anthropic]).toBeDefined() + expect(after[ProviderID.anthropic].source).toBe("env") + expect(after[ProviderID.anthropic].key).toBe("late-key") + + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("list() reflects env var removed after detection", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + set("ANTHROPIC_API_KEY", "ephemeral-key") + const before = await list() + expect(before[ProviderID.anthropic]).toBeDefined() + + remove("ANTHROPIC_API_KEY") + const after = await list() + expect(after[ProviderID.anthropic]).toBeUndefined() + }, + }) +}) + +test("list() refreshes key when env value changes", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + set("ANTHROPIC_API_KEY", "first-key") + const first = await list() + expect(first[ProviderID.anthropic].key).toBe("first-key") + + set("ANTHROPIC_API_KEY", "rotated-key") + const second = await list() + expect(second[ProviderID.anthropic].key).toBe("rotated-key") + + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("list() reflects direct process.env mutation", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + const before = await list() + expect(before[ProviderID.anthropic]).toBeUndefined() + + process.env["ANTHROPIC_API_KEY"] = "direct-key" + const after = await list() + expect(after[ProviderID.anthropic]).toBeDefined() + expect(after[ProviderID.anthropic].source).toBe("env") + expect(after[ProviderID.anthropic].key).toBe("direct-key") + + delete process.env["ANTHROPIC_API_KEY"] + }, + }) +}) + +test("list() respects whitelist for late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { whitelist: ["nonexistent-model"] }, + }, + }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + const before = await list() + expect(before[ProviderID.anthropic]).toBeUndefined() + + set("ANTHROPIC_API_KEY", "key-with-whitelist") + const after = await list() + expect(after[ProviderID.anthropic]).toBeUndefined() + + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("list() respects disabled_providers for late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + disabled_providers: ["anthropic"], + }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + set("ANTHROPIC_API_KEY", "key-but-disabled") + const providers = await list() + expect(providers[ProviderID.anthropic]).toBeUndefined() + + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("getProvider() reflects env var set after first call", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + const before = await getProvider(ProviderID.anthropic) + expect(before).toBeUndefined() + + set("ANTHROPIC_API_KEY", "key-via-getProvider") + const after = await getProvider(ProviderID.anthropic) + expect(after).toBeDefined() + expect(after!.source).toBe("env") + + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("getModel() resolves late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const model = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514")) + expect(model).toBeDefined() + expect(model.providerID).toBe(ProviderID.anthropic) + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("getLanguage() resolves late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const model = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514")) + const lang = await getLanguage(model) + expect(lang).toBeDefined() + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("closest() resolves late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const r = await closest(ProviderID.anthropic, ["sonnet-4-20250514"]) + expect(r?.providerID).toBe(ProviderID.anthropic) + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("getSmallModel() resolves late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const small = await getSmallModel(ProviderID.anthropic) + expect(small).toBeDefined() + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("defaultModel() finds late-detected env provider", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + enabled_providers: ["anthropic"], + }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + const before = await list() + expect(before[ProviderID.anthropic]).toBeUndefined() + set("ANTHROPIC_API_KEY", "late-key") + const d = await defaultModel() + expect(d).toBeDefined() + expect(d.providerID).toBe(ProviderID.anthropic) + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("late-detected env provider does NOT expose alpha models without flag", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + "alpha-only": { + name: "Alpha Only", + npm: "@ai-sdk/openai-compatible", + api: "https://api.example.com/v1", + env: ["CUSTOM_API_KEY"], + models: { + active: { name: "Active" }, + experimental: { name: "Experimental", status: "alpha" as const }, + }, + }, + }, + }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("CUSTOM_API_KEY") + await list() + set("CUSTOM_API_KEY", "late-key") + const after = await list() + const p = after[ProviderID.make("alpha-only")] + expect(p).toBeDefined() + expect(p.models["active"]).toBeDefined() + expect(p.models["experimental"]).toBeUndefined() + remove("CUSTOM_API_KEY") + }, + }) +}) + +test("late-detected env provider respects blacklist", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ + $schema: "https://opencode.ai/config.json", + provider: { + anthropic: { blacklist: ["claude-sonnet-4-20250514"] }, + }, + }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const after = await list() + expect(after[ProviderID.anthropic]).toBeDefined() + expect(after[ProviderID.anthropic].models["claude-sonnet-4-20250514"]).toBeUndefined() + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("late-detected multi-env provider is excluded (requires restart)", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + ;[ + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_REGION", + "AWS_BEARER_TOKEN_BEDROCK", + "AWS_PROFILE", + "AWS_ROLE_ARN", + "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", + "AWS_CONTAINER_CREDENTIALS_FULL_URI", + "AWS_WEB_IDENTITY_TOKEN_FILE", + ].forEach(remove) + const before = await list() + expect(before[ProviderID.amazonBedrock]).toBeUndefined() + set("AWS_ACCESS_KEY_ID", "AKIA-LATE") + const after = await list() + // Multi-credential providers (bedrock, sap-ai-core, azure, vertex) require an + // opencode restart for newly-set credentials to take effect. The custom + // loader emits credentialProvider/deploymentId/etc. only when creds are + // present at init, and those options are not re-derivable post-init from + // env alone. Live-promoting them via the env overlay would yield an + // unauthenticated SDK that fails silently at first request, so we + // honestly hide them. + expect(after[ProviderID.amazonBedrock]).toBeUndefined() + remove("AWS_ACCESS_KEY_ID") + }, + }) +}) + +test("auth source wins over late-detected env", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + const authPath = path.join(Global.Path.data, "auth.json") + let prev: string | undefined + try { + prev = await Filesystem.readText(authPath) + } catch {} + try { + await mkdir(path.dirname(authPath), { recursive: true }) + await Filesystem.write( + authPath, + JSON.stringify({ anthropic: { type: "api", key: "auth-key" } }), + ) + await disposeAllInstances() + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + set("ANTHROPIC_API_KEY", "env-key") + const providers = await list() + expect(providers[ProviderID.anthropic]).toBeDefined() + expect(providers[ProviderID.anthropic].source).toBe("api") + expect(providers[ProviderID.anthropic].key).toBe("auth-key") + remove("ANTHROPIC_API_KEY") + }, + }) + } finally { + if (prev !== undefined) { + await Filesystem.write(authPath, prev) + } else { + try { + await unlink(authPath) + } catch {} + } + await disposeAllInstances() + } +}) + +test("late-detected env provider has variants populated (parity with env-at-boot)", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + remove("ANTHROPIC_API_KEY") + await list() + set("ANTHROPIC_API_KEY", "late-key") + const after = await list() + const model = after[ProviderID.anthropic].models["claude-sonnet-4-20250514"] + expect(model).toBeDefined() + expect(model.capabilities.reasoning).toBe(true) + expect(model.variants).toBeDefined() + expect(Object.keys(model.variants!).length).toBeGreaterThan(0) + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +test("getLanguage rejects when env removed between getModel and getLanguage", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + set("ANTHROPIC_API_KEY", "key") + const model = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514")) + remove("ANTHROPIC_API_KEY") + await expect(getLanguage(model)).rejects.toThrow() + }, + }) +}) + +test("getLanguage rebuilds language model after env key rotation", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + set("ANTHROPIC_API_KEY", "first-key") + const model = await getModel(ProviderID.anthropic, ModelID.make("claude-sonnet-4-20250514")) + const lang1 = await getLanguage(model) + set("ANTHROPIC_API_KEY", "rotated-key") + const lang2 = await getLanguage(model) + expect(lang1).not.toBe(lang2) + remove("ANTHROPIC_API_KEY") + }, + }) +}) + +const RESTART_CASES = [ + { + id: "azure", + providerID: ProviderID.azure, + cleanup: ["AZURE_RESOURCE_NAME"], + setKey: "AZURE_RESOURCE_NAME", + setValue: "late-resource", + }, + { + id: "azure-cognitive-services", + providerID: ProviderID.make("azure-cognitive-services"), + cleanup: ["AZURE_COGNITIVE_SERVICES_RESOURCE_NAME"], + setKey: "AZURE_COGNITIVE_SERVICES_RESOURCE_NAME", + setValue: "late-cognitive", + }, + { + id: "google-vertex", + providerID: ProviderID.googleVertex, + cleanup: [ + "GOOGLE_VERTEX_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCP_PROJECT", + "GCLOUD_PROJECT", + "GOOGLE_VERTEX_LOCATION", + "GOOGLE_CLOUD_LOCATION", + "VERTEX_LOCATION", + "GOOGLE_APPLICATION_CREDENTIALS", + ], + setKey: "GOOGLE_VERTEX_PROJECT", + setValue: "late-project", + }, + { + id: "google-vertex-anthropic", + providerID: ProviderID.make("google-vertex-anthropic"), + cleanup: [ + "GOOGLE_VERTEX_PROJECT", + "GOOGLE_CLOUD_PROJECT", + "GCP_PROJECT", + "GCLOUD_PROJECT", + "GOOGLE_VERTEX_LOCATION", + "GOOGLE_CLOUD_LOCATION", + "VERTEX_LOCATION", + "GOOGLE_APPLICATION_CREDENTIALS", + ], + setKey: "GOOGLE_CLOUD_PROJECT", + setValue: "late-project", + }, + { + id: "sap-ai-core", + providerID: ProviderID.make("sap-ai-core"), + cleanup: ["AICORE_SERVICE_KEY", "AICORE_DEPLOYMENT_ID", "AICORE_RESOURCE_GROUP"], + setKey: "AICORE_SERVICE_KEY", + setValue: '{"clientid":"x","clientsecret":"y","url":"https://x"}', + }, + { + id: "cloudflare-workers-ai", + providerID: ProviderID.make("cloudflare-workers-ai"), + cleanup: ["CLOUDFLARE_ACCOUNT_ID", "CLOUDFLARE_API_TOKEN"], + setKey: "CLOUDFLARE_ACCOUNT_ID", + setValue: "late-account", + }, + { + id: "cloudflare-ai-gateway", + providerID: ProviderID.make("cloudflare-ai-gateway"), + cleanup: ["CLOUDFLARE_ACCOUNT_ID", "CLOUDFLARE_GATEWAY_ID", "CLOUDFLARE_API_TOKEN"], + setKey: "CLOUDFLARE_ACCOUNT_ID", + setValue: "late-account", + }, +] as const + +for (const c of RESTART_CASES) { + test(`late-detected ${c.id} is excluded (requires restart)`, async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + c.cleanup.forEach(remove) + const before = await list() + expect(before[c.providerID]).toBeUndefined() + set(c.setKey, c.setValue) + const after = await list() + expect(after[c.providerID]).toBeUndefined() + remove(c.setKey) + }, + }) + }) +} + +test("warnRestartRequired emits at most one warn per provider per instance", async () => { + await using tmp = await tmpdir({ + init: async (dir) => { + await Bun.write( + path.join(dir, "opencode.json"), + JSON.stringify({ $schema: "https://opencode.ai/config.json" }), + ) + }, + }) + const providerLog = Log.create({ service: "provider" }) + const calls: { providerID: string; env: string }[] = [] + const original = providerLog.warn + providerLog.warn = (message?: any, extra?: Record) => { + if (typeof message === "string" && message.includes("requires restart")) { + calls.push({ providerID: extra?.providerID, env: extra?.env }) + } + return original.call(providerLog, message, extra) + } + try { + await WithInstance.provide({ + directory: tmp.path, + fn: async () => { + ;["AICORE_SERVICE_KEY", "AICORE_DEPLOYMENT_ID", "AICORE_RESOURCE_GROUP"].forEach(remove) + await list() + set("AICORE_SERVICE_KEY", "late") + await list() + await list() + await list() + const sapWarns = calls.filter((c) => c.providerID === "sap-ai-core") + expect(sapWarns.length).toBe(1) + expect(sapWarns[0].env).toBe("AICORE_SERVICE_KEY") + remove("AICORE_SERVICE_KEY") + }, + }) + } finally { + providerLog.warn = original + } +})