From 59ab6642bedc683bcabef27a7b8ca640e4fa8906 Mon Sep 17 00:00:00 2001 From: cepvor Date: Sat, 16 May 2026 12:02:18 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20OpenAI=20=E8=B7=AF=E5=BE=84=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E9=98=B2=E5=BE=A1=E6=80=A7=20usage=20=E5=90=88?= =?UTF-8?q?=E5=B9=B6=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 将 message_delta 处理器中的 `{ ...usage, ...deltaUsage }` spread 替换为 updateOpenAIUsage() 函数,与 claude.ts 的 updateUsage() 模式 保持一致。 当前适配器发送的 message_delta 包含完整字段,spread 不会丢失数据。 但若未来适配器在某事件中省略了 cache 字段(传 explicit 0),spread 会无声覆盖有效值。新函数仅在 delta 字段有意义值时更新,否则保留 当前值。 Co-Authored-By: deepseek-v4-pro[1m] --- src/services/api/openai/index.ts | 45 +++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/src/services/api/openai/index.ts b/src/services/api/openai/index.ts index 520290b189..3e8225f516 100644 --- a/src/services/api/openai/index.ts +++ b/src/services/api/openai/index.ts @@ -130,6 +130,46 @@ function isOpenAIConvertibleMessage( return msg.type === 'assistant' || msg.type === 'user' } +/** + * Merge delta usage into accumulated usage, preserving cache fields from + * previous values when the delta carries explicit zeroes. + * + * Mirrors updateUsage() in claude.ts: Anthropic's streaming API may send + * explicit 0 for cache fields in message_delta events, which should not + * overwrite valid values from message_start. OpenAI-compatible endpoints + * don't currently exhibit this behavior, but defensive field-level merging + * prevents a future adapter change from silently zeroing cache data. + */ +function updateOpenAIUsage( + current: { + input_tokens: number + output_tokens: number + cache_creation_input_tokens: number + cache_read_input_tokens: number + }, + delta: { + input_tokens?: number + output_tokens?: number + cache_creation_input_tokens?: number + cache_read_input_tokens?: number + }, +): typeof current { + return { + input_tokens: delta.input_tokens ?? current.input_tokens, + output_tokens: delta.output_tokens ?? current.output_tokens, + cache_creation_input_tokens: + delta.cache_creation_input_tokens !== undefined && + delta.cache_creation_input_tokens > 0 + ? delta.cache_creation_input_tokens + : current.cache_creation_input_tokens, + cache_read_input_tokens: + delta.cache_read_input_tokens !== undefined && + delta.cache_read_input_tokens > 0 + ? delta.cache_read_input_tokens + : current.cache_read_input_tokens, + } +} + /** * Assemble the final AssistantMessage (and optional max_tokens error) from * accumulated stream state. Extracted to avoid duplication between the @@ -449,7 +489,10 @@ export async function* queryModelOpenAI( case 'message_delta': { const deltaUsage = (event as any).usage if (deltaUsage) { - usage = { ...usage, ...deltaUsage } + // Defensive merge: only update fields that are present and meaningful. + // Matches the pattern in claude.ts updateUsage() — prevents a future + // adapter change from silently zeroing cache fields via spread. + usage = updateOpenAIUsage(usage, deltaUsage) } if ((event as any).delta?.stop_reason != null) { stopReason = (event as any).delta.stop_reason