Skip to content
This repository was archived by the owner on Mar 19, 2026. It is now read-only.

Commit 9c20af1

Browse files
committed
fix(core): enhance response metadata handling and optimize provider model caching
1 parent ad7dc63 commit 9c20af1

5 files changed

Lines changed: 53 additions & 7 deletions

File tree

packages/core/src/generate/stream-text.ts

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,6 +144,25 @@ export function streamMessageChunks(
144144
})
145145

146146
await writer.write(chunk)
147+
} else if (value.type === 'response-metadata') {
148+
await writer.write({
149+
role,
150+
content: '',
151+
metadata: {
152+
response_metadata: value.metadata
153+
},
154+
chunk: true
155+
})
156+
} else if (value.type === 'finish') {
157+
await writer.write({
158+
role,
159+
content: '',
160+
metadata: {
161+
usgae: value.usage,
162+
finish_reason: value.finishReason
163+
},
164+
chunk: true
165+
})
147166
} else {
148167
// Pass through other types directly
149168
// For example, source parts would be passed through here
@@ -384,7 +403,7 @@ export function streamText({
384403
const metadata = {
385404
type: 'response-metadata',
386405
metadata: {
387-
timestamp: new Date(),
406+
timestamp: Date.now(),
388407
model: settings.modelId ?? model.model,
389408
// eslint-disable-next-line @typescript-eslint/no-explicit-any
390409
responseType: currentPartType as any

packages/core/src/language-models/index.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,8 @@ export type LanguageModelUsage = {
6262

6363
export type LanguageResponseMetadata = {
6464
id?: string
65-
timestamp?: Date
66-
model?: string
65+
timestamp: number
66+
model: string
6767
responseType: 'text' | 'reasoning' | 'source' | 'tool-call'
6868
}
6969

packages/core/src/provider/registry.ts

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,22 @@ export interface Provider<T extends ProviderConfig = ProviderConfig> {
1717
export class DefaultProviderRegistry implements Provider {
1818
private providers: Record<string, Provider> = {}
1919

20-
private _providerModels: Record<string, PlatformModelInfo[]> = {}
20+
private _providerModels: Record<string, PlatformModelInfo[]> = new Proxy(
21+
{},
22+
{
23+
get(target, prop) {
24+
return Reflect.get(target, prop)
25+
},
26+
set(target, prop, value) {
27+
try {
28+
throw new Error(`6667`)
29+
} catch (err) {
30+
console.error(err)
31+
}
32+
return Reflect.set(target, prop, value)
33+
}
34+
}
35+
)
2136

2237
providerName: string = 'default'
2338

@@ -77,7 +92,7 @@ export class DefaultProviderRegistry implements Provider {
7792
throw new Error(
7893
`No such language model: ${modelId}. Available models: ${Object.entries(
7994
this._providerModels
80-
).flatMap(([k, v]) => v.map((m) => `${k}/${m.name}`))})}`
95+
).flatMap(([k, v]) => v.map((m) => `${k}:${m.name}`))})}`
8196
)
8297
}
8398
return model
@@ -106,6 +121,7 @@ export class DefaultProviderRegistry implements Provider {
106121
for (const providerId in this.providers) {
107122
if (this._providerModels[providerId]) {
108123
const cachedModels = this._providerModels[providerId]
124+
console.log(`[${providerId}] use cached models`, cachedModels)
109125
result.push(...cachedModels)
110126
promises.push(Promise.resolve(cachedModels))
111127
continue
@@ -121,16 +137,24 @@ export class DefaultProviderRegistry implements Provider {
121137
})
122138
)
123139

140+
console.log(
141+
`[${providerId}] use hardcode models`,
142+
cachePlatformModels
143+
)
144+
124145
result.push(...cachePlatformModels)
125146
this._providerModels[providerId] = cachePlatformModels
126147

127148
const latestPlatformModels = latestModels.then((models) => {
128149
const platformModels = models.map((model) => ({
129150
...model,
130151
provider: providerId
131-
})) as PlatformModelInfo[]
152+
}))
132153

133154
this._providerModels[providerId] = platformModels
155+
156+
console.log(`[${providerId}] use latest models`, platformModels)
157+
134158
return platformModels
135159
})
136160
promises.push(latestPlatformModels)

packages/openai-compatible/src/get-latest-models.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,9 @@ export async function getLatestModels(
4949
.map((model): ModelInfo => {
5050
// TODO: check gemini, deepseek, ... to get the correct context token
5151
const modelId = model.id
52+
53+
// 监听 modelId 改动
54+
5255
return {
5356
name: modelId,
5457
type:

packages/openai-compatible/src/provider.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ export function createOpenAICompatibleProvider(
9292
modelId,
9393
provider as OpenAICompatibleProvider,
9494
{
95-
batchSize: 200
95+
batchSize: 20
9696
},
9797
fetch
9898
)

0 commit comments

Comments
 (0)