From b6610e68c1f6ea473080377d779b3cc65e436dda Mon Sep 17 00:00:00 2001 From: longsizhuo Date: Thu, 16 Apr 2026 19:40:37 +0000 Subject: [PATCH] =?UTF-8?q?fix(assistant):=20=E5=9B=9E=E5=BA=94=20PR=20#29?= =?UTF-8?q?3=20CR=20=E2=80=94=20=E5=8C=BF=E5=90=8D=E7=9F=AD=E8=B7=AF?= =?UTF-8?q?=E4=B8=8D=E6=8A=9B=E9=94=99=20+=20ZHIPU=5FAPI=5FKEY=20=E6=A0=A1?= =?UTF-8?q?=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Copilot 在 PR #293 指出两点,follow-up 修复: - **app/api/chat/route.ts**:原先用 `throw new Error("Anonymous request")` 触发 fallback,副作用是每个匿名请求都被 catch 打成带 stack 的 "Java Backend unavailable" warn,生产日志会刷爆。改成显式 if-else 分支 直接短路到本地推理,不抛错,只打一行 info 级日志。顺带清理了多余的 x-satoken 三元展开。 - **lib/ai/providers/intern.ts**:原先把 process.env.ZHIPU_API_KEY 直接 喂给 createOpenAICompatible,漏配时下游会 401/500,UI 上依旧表现为 "unauthorized" —— 这正是 issue #285 要根治的症状,不能再放任。加了 显式校验,缺失时抛出带 Vercel 配置指引的错误。 --- app/api/chat/route.ts | 99 ++++++++++++++++++++------------------ lib/ai/providers/intern.ts | 14 +++++- 2 files changed, 65 insertions(+), 48 deletions(-) diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index fb144d0..af6e1bc 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -35,60 +35,65 @@ export async function POST(req: Request) { // ====== 尝试优雅降级代理到 Java 后端 ====== // Java 后端 /openai/responses/stream 带 @SaCheckLogin,匿名请求必 401; // 直接跳过代理省掉 5s 超时,也避免 401 文案被上游误显示为"unauthorized"。 - const hasAuthToken = Boolean(req.headers.get("x-satoken")); - try { - if (!hasAuthToken) { - throw new Error("Anonymous request, skip backend proxy."); - } - const backendUrl = process.env.BACKEND_URL; - if (!backendUrl) throw new Error("BACKEND_URL is not configured."); + // 匿名分支走显式 if 短路,不进 try/catch —— 否则每个匿名请求都会被 catch + // 打成 "Java Backend unavailable" 带 stack 的 warn,生产日志会刷爆 + // (Copilot CR #1)。 + const satoken = req.headers.get("x-satoken"); + if (!satoken) { + console.log( + "[Chat Fallback Proxy] ⏭️ Anonymous request, skip backend proxy, use local inference.", + ); + } else { + try { + const backendUrl = process.env.BACKEND_URL; + if (!backendUrl) throw new Error("BACKEND_URL is not configured."); - const controller = new AbortController(); - const timeoutId = setTimeout(() => controller.abort(), 5000); // 5秒超时 + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 5000); // 5秒超时 - // 原封不动把前端的参数丢给 Java - let proxyRes: Response; - try { - proxyRes = await fetch(`${backendUrl}/openai/responses/stream`, { - method: "POST", - headers: { - "Content-Type": "application/json", - // 浏览器侧用 x-satoken 传递 token,转发给后端时改回后端期望的 satoken - ...(req.headers.get("x-satoken") - ? { satoken: req.headers.get("x-satoken")! } - : {}), - }, - body: await proxyReq.text(), - signal: controller.signal, - }); - } finally { - // 无论成功还是抛出(网络错误/超时中断),都清除定时器 - clearTimeout(timeoutId); - } + // 原封不动把前端的参数丢给 Java + let proxyRes: Response; + try { + proxyRes = await fetch(`${backendUrl}/openai/responses/stream`, { + method: "POST", + headers: { + "Content-Type": "application/json", + // 浏览器侧用 x-satoken 传递 token,转发给后端时改回后端期望的 satoken + satoken, + }, + body: await proxyReq.text(), + signal: controller.signal, + }); + } finally { + // 无论成功还是抛出(网络错误/超时中断),都清除定时器 + clearTimeout(timeoutId); + } - // 如果 Java 后端返回成功,则直接把它的流传回浏览器,提前结束 - if (proxyRes.ok && proxyRes.body) { - console.log( - "[Chat Fallback Proxy] 🚀 Java Backend responded successfully. Piping stream...", - ); - return new Response(proxyRes.body, { - headers: { - "Content-Type": - proxyRes.headers.get("Content-Type") || "text/plain; charset=utf-8", - }, - }); - } else { + // 如果 Java 后端返回成功,则直接把它的流传回浏览器,提前结束 + if (proxyRes.ok && proxyRes.body) { + console.log( + "[Chat Fallback Proxy] 🚀 Java Backend responded successfully. Piping stream...", + ); + return new Response(proxyRes.body, { + headers: { + "Content-Type": + proxyRes.headers.get("Content-Type") || + "text/plain; charset=utf-8", + }, + }); + } else { + console.warn( + `[Chat Fallback Proxy] ⚠️ Java Backend returned status: ${proxyRes.status}, fallback to local Next.js inference.`, + ); + } + } catch (error) { console.warn( - `[Chat Fallback Proxy] ⚠️ Java Backend returned status: ${proxyRes.status}, fallback to local Next.js inference.`, + `[Chat Fallback Proxy] ❌ Java Backend unavailable or timed out, fallback to local Next.js inference. Error:`, + error, ); } - } catch (error) { - console.warn( - `[Chat Fallback Proxy] ❌ Java Backend unavailable or timed out, fallback to local Next.js inference. Error:`, - error, - ); } - // ====== 代理失败,继续往下走,启用备选方案(本地直连 AI)====== + // ====== 代理失败/匿名短路,继续往下走,启用备选方案(本地直连 AI)====== try { // 先把 body 消费掉,再并行验证用户身份 diff --git a/lib/ai/providers/intern.ts b/lib/ai/providers/intern.ts index 4e4956e..b9a8920 100644 --- a/lib/ai/providers/intern.ts +++ b/lib/ai/providers/intern.ts @@ -23,10 +23,22 @@ export function createInternModel() { return deepseek("deepseek-chat"); } + // 显式校验 ZHIPU_API_KEY:若漏配,下游 401 又会在 UI 上变成 "unauthorized" + // 透传 —— 正好绕回 issue #285 原本要修的症状。在这里早抛出带指引的错误, + // 运维看日志一眼知道补哪个 env var,避免二次塌房(Copilot CR #2)。 + const zhipuApiKey = process.env.ZHIPU_API_KEY; + if (!zhipuApiKey || zhipuApiKey.trim() === "") { + throw new Error( + "Missing required environment variable ZHIPU_API_KEY. " + + "配置位置:Vercel Project Settings → Environment Variables。" + + "免费 key 从 https://open.bigmodel.cn/ 获取。", + ); + } + const glm = createOpenAICompatible({ name: "zhipu", baseURL: "https://open.bigmodel.cn/api/paas/v4/", - apiKey: process.env.ZHIPU_API_KEY, + apiKey: zhipuApiKey, }); return glm("glm-4.6v-flash");