diff --git a/README.md b/README.md index b9e994e50..fce62ba37 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [Open in Gitpod](https://www.bt.cn/new/download.html) +[Deploy on Vercel](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [BT Deply Install](https://www.bt.cn/new/download.html) [Deploy to Alibaba Cloud](https://computenest.aliyun.com/market/service-f1c9b75e59814dc49d52) [](https://monica.im/?utm=nxcrp) @@ -301,6 +301,14 @@ iflytek Api Key. iflytek Api Secret. +### `CHATGLM_API_KEY` (optional) + +ChatGLM Api Key. + +### `CHATGLM_URL` (optional) + +ChatGLM Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -397,6 +405,9 @@ yarn dev > [简体中文 > 如何部署到私人服务器](./README_CN.md#部署) +### BT Install +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### Docker (Recommended) ```shell diff --git a/README_CN.md b/README_CN.md index 3f339ea61..d4da8b9da 100644 --- a/README_CN.md +++ b/README_CN.md @@ -184,6 +184,13 @@ ByteDance Api Url. 讯飞星火Api Secret. +### `CHATGLM_API_KEY` (可选) + +ChatGLM Api Key. + +### `CHATGLM_URL` (可选) + +ChatGLM Api Url. ### `HIDE_USER_API_KEY` (可选) @@ -264,6 +271,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy ## 部署 +### 宝塔面板部署 +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### 容器部署 (推荐) > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 5ac248d0c..3017fd371 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -11,6 +11,7 @@ import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; import { handle as xaiHandler } from "../../xai"; +import { handle as chatglmHandler } from "../../glm"; import { handle as proxyHandler } from "../../proxy"; async function handle( @@ -41,6 +42,8 @@ async function handle( return iflytekHandler(req, { params }); case ApiPath.XAI: return xaiHandler(req, { params }); + case ApiPath.ChatGLM: + return chatglmHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/auth.ts b/app/api/auth.ts index d4ac66a11..6703b64bd 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -95,6 +95,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.XAI: systemApiKey = serverConfig.xaiApiKey; break; + case ModelProvider.ChatGLM: + systemApiKey = serverConfig.chatglmApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/common.ts b/app/api/common.ts index b4c792d6f..495a12ccd 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,8 +1,8 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; -import { isModelAvailableInServer } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; +import { getModelProvider, isModelAvailableInServer } from "../utils/model"; const serverConfig = getServerSideConfig(); @@ -71,7 +71,7 @@ export async function requestOpenai(req: NextRequest) { .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) .forEach((m) => { const [fullName, displayName] = m.split("="); - const [_, providerName] = fullName.split("@"); + const [_, providerName] = getModelProvider(fullName); if (providerName === "azure" && !displayName) { const [_, deployId] = (serverConfig?.azureUrl ?? "").split( "deployments/", diff --git a/app/api/glm.ts b/app/api/glm.ts new file mode 100644 index 000000000..3625b9f7b --- /dev/null +++ b/app/api/glm.ts @@ -0,0 +1,129 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + CHATGLM_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[GLM Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.ChatGLM); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[GLM] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, ""); + + let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + console.log("[Fetch Url] ", fetchUrl); + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ChatGLM as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[GLM] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index 4238c2a26..1da81e964 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -21,6 +21,7 @@ import { HunyuanApi } from "./platforms/tencent"; import { MoonshotApi } from "./platforms/moonshot"; import { SparkApi } from "./platforms/iflytek"; import { XAIApi } from "./platforms/xai"; +import { ChatGLMApi } from "./platforms/glm"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -69,7 +70,7 @@ export interface ChatOptions { config: LLMConfig; onUpdate?: (message: string, chunk: string) => void; - onFinish: (message: string) => void; + onFinish: (message: string, responseRes: Response) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; onBeforeTool?: (tool: ChatMessageTool) => void; @@ -156,6 +157,9 @@ export class ClientApi { case ModelProvider.XAI: this.llm = new XAIApi(); break; + case ModelProvider.ChatGLM: + this.llm = new ChatGLMApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -244,6 +248,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; const isXAI = modelConfig.providerName === ServiceProvider.XAI; + const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -259,6 +264,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { ? accessStore.moonshotApiKey : isXAI ? accessStore.xaiApiKey + : isChatGLM + ? accessStore.chatglmApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -274,6 +281,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isMoonshot, isIflytek, isXAI, + isChatGLM, apiKey, isEnabledAccessControl, }; @@ -338,6 +346,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Iflytek); case ServiceProvider.XAI: return new ClientApi(ModelProvider.XAI); + case ServiceProvider.ChatGLM: + return new ClientApi(ModelProvider.ChatGLM); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 86229a147..6fe69e87a 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -143,6 +143,7 @@ export class QwenApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -172,7 +173,7 @@ export class QwenApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -188,6 +189,7 @@ export class QwenApi implements LLMApi { "[Alibaba] request response content type: ", contentType, ); + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); @@ -254,7 +256,7 @@ export class QwenApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 3645cbe6e..6747221a8 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -317,13 +317,14 @@ export class ClaudeApi implements LLMApi { }; try { - controller.signal.onabort = () => options.onFinish(""); + controller.signal.onabort = () => + options.onFinish("", new Response(null, { status: 400 })); const res = await fetch(path, payload); const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } catch (e) { console.error("failed to chat", e); options.onError?.(e as Error); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 2511a696b..9e8c2f139 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -162,6 +162,7 @@ export class ErnieApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -191,7 +192,7 @@ export class ErnieApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -204,7 +205,7 @@ export class ErnieApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Baidu] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -267,7 +268,7 @@ export class ErnieApi implements LLMApi { const resJson = await res.json(); const message = resJson?.result; - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 000a9e278..a2f0660d8 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -130,6 +130,7 @@ export class DoubaoApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -159,7 +160,7 @@ export class DoubaoApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -175,7 +176,7 @@ export class DoubaoApi implements LLMApi { "[ByteDance] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -241,7 +242,7 @@ export class DoubaoApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts new file mode 100644 index 000000000..a7965947f --- /dev/null +++ b/app/client/platforms/glm.ts @@ -0,0 +1,197 @@ +"use client"; +import { + ApiPath, + CHATGLM_BASE_URL, + ChatGLM, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class ChatGLMApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.chatglmUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.ChatGLM; + baseUrl = isApp ? CHATGLM_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] glm payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(ChatGLM.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index a4b594ddf..53ff00aee 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -274,7 +274,7 @@ export class GeminiProApi implements LLMApi { ); } const message = apiClient.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index 55a39d0cc..cfc37b3b2 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -117,6 +117,7 @@ export class SparkApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // Animate response text to make it look smooth function animateResponseText() { @@ -143,7 +144,7 @@ export class SparkApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -156,7 +157,7 @@ export class SparkApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Spark] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -231,7 +232,7 @@ export class SparkApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index 22a34b2e2..b6812c0d7 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -180,7 +180,7 @@ export class MoonshotApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 30f7415c1..7c1588440 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -65,6 +65,7 @@ export interface RequestPayload { frequency_penalty: number; top_p: number; max_tokens?: number; + max_completion_tokens?: number; } export interface DalleRequestPayload { @@ -233,6 +234,11 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; + // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) + if (isO1) { + requestPayload["max_completion_tokens"] = modelConfig.max_tokens; + } + // add max_tokens to vision model if (visionModel) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); @@ -361,7 +367,7 @@ export class ChatGPTApi implements LLMApi { const resJson = await res.json(); const message = await this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 3610fac0a..580844a5b 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -142,6 +142,7 @@ export class HunyuanApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -171,7 +172,7 @@ export class HunyuanApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -187,7 +188,7 @@ export class HunyuanApi implements LLMApi { "[Tencent] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -253,7 +254,7 @@ export class HunyuanApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index deb74e66c..06dbaaa29 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -173,7 +173,7 @@ export class XAIApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/components/auth.tsx b/app/components/auth.tsx index 539a52eec..5375bda3f 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -18,6 +18,8 @@ import { trackSettingsPageGuideToCPaymentClick, trackAuthorizationPageButtonToCPaymentClick, } from "../utils/auth-settings-events"; +import clsx from "clsx"; + const storage = safeLocalStorage(); export function AuthPage() { @@ -54,7 +56,7 @@ export function AuthPage() { onClick={() => navigate(Path.Home)} > -
+
@@ -163,7 +165,7 @@ function TopBanner() { onMouseEnter={handleMouseEnter} onMouseLeave={handleMouseLeave} > -
+
{Locale.Auth.TopTips} diff --git a/app/components/button.tsx b/app/components/button.tsx index 87b4abd30..157d5d73d 100644 --- a/app/components/button.tsx +++ b/app/components/button.tsx @@ -2,6 +2,7 @@ import * as React from "react"; import styles from "./button.module.scss"; import { CSSProperties } from "react"; +import clsx from "clsx"; export type ButtonType = "primary" | "danger" | null; @@ -22,12 +23,16 @@ export function IconButton(props: { }) { return (
diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 03b1a5c88..63dc4d5ff 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -18,6 +18,7 @@ import { Mask } from "../store/mask"; import { useRef, useEffect } from "react"; import { showConfirm } from "./ui-lib"; import { useMobileScreen } from "../utils"; +import clsx from "clsx"; export function ChatItem(props: { onClick?: () => void; @@ -45,11 +46,11 @@ export function ChatItem(props: { {(provided) => (
{ draggableRef.current = ele; @@ -63,7 +64,7 @@ export function ChatItem(props: { > {props.narrow ? (
-
+
void }) { text={Locale.Chat.Config.Reset} onClick={async () => { if (await showConfirm(Locale.Memory.ResetConfirm)) { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.memoryPrompt = ""), ); } @@ -173,7 +176,10 @@ export function SessionConfigModel(props: { onClose: () => void }) { updateMask={(updater) => { const mask = { ...session.mask }; updater(mask); - chatStore.updateCurrentSession((session) => (session.mask = mask)); + chatStore.updateTargetSession( + session, + (session) => (session.mask = mask), + ); }} shouldSyncFromGlobal extraListItems={ @@ -206,7 +212,7 @@ function PromptToast(props: {
{props.showToast && context.length > 0 && (
props.setShowModal(true)} > @@ -327,10 +333,9 @@ export function PromptHints(props: { {props.prompts.map((prompt, i) => (
props.onPromptSelect(prompt)} onMouseEnter={() => setSelectIndex(i)} @@ -345,12 +350,14 @@ export function PromptHints(props: { function ClearContextDivider() { const chatStore = useChatStore(); + const session = chatStore.currentSession(); return (
- chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.clearContextIndex = undefined), ) } @@ -388,7 +395,7 @@ export function ChatAction(props: { return (
{ props.onClick(); setTimeout(updateWidth, 1); @@ -460,6 +467,7 @@ export function ChatActions(props: { const navigate = useNavigate(); const chatStore = useChatStore(); const pluginStore = usePluginStore(); + const session = chatStore.currentSession(); // switch themes const theme = config.theme; @@ -476,10 +484,9 @@ export function ChatActions(props: { const stopAll = () => ChatControllerPool.stopAll(); // switch model - const currentModel = chatStore.currentSession().mask.modelConfig.model; + const currentModel = session.mask.modelConfig.model; const currentProviderName = - chatStore.currentSession().mask.modelConfig?.providerName || - ServiceProvider.OpenAI; + session.mask.modelConfig?.providerName || ServiceProvider.OpenAI; const allModels = useAllModels(); const models = useMemo(() => { const filteredModels = allModels.filter((m) => m.available); @@ -513,12 +520,9 @@ export function ChatActions(props: { const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"]; const dalle3Qualitys: DalleQuality[] = ["standard", "hd"]; const dalle3Styles: DalleStyle[] = ["vivid", "natural"]; - const currentSize = - chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024"; - const currentQuality = - chatStore.currentSession().mask.modelConfig?.quality ?? "standard"; - const currentStyle = - chatStore.currentSession().mask.modelConfig?.style ?? "vivid"; + const currentSize = session.mask.modelConfig?.size ?? "1024x1024"; + const currentQuality = session.mask.modelConfig?.quality ?? "standard"; + const currentStyle = session.mask.modelConfig?.style ?? "vivid"; const isMobileScreen = useMobileScreen(); @@ -536,7 +540,7 @@ export function ChatActions(props: { if (isUnavailableModel && models.length > 0) { // show next model to default model if exist let nextModel = models.find((model) => model.isDefault) || models[0]; - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.model = nextModel.name; session.mask.modelConfig.providerName = nextModel?.provider ?.providerName as ServiceProvider; @@ -547,7 +551,7 @@ export function ChatActions(props: { : nextModel.name, ); } - }, [chatStore, currentModel, models]); + }, [chatStore, currentModel, models, session]); return (
@@ -614,7 +618,7 @@ export function ChatActions(props: { text={Locale.Chat.InputActions.Clear} icon={} onClick={() => { - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { if (session.clearContextIndex === session.messages.length) { session.clearContextIndex = undefined; } else { @@ -645,8 +649,8 @@ export function ChatActions(props: { onClose={() => setShowModelSelector(false)} onSelection={(s) => { if (s.length === 0) return; - const [model, providerName] = s[0].split("@"); - chatStore.updateCurrentSession((session) => { + const [model, providerName] = getModelProvider(s[0]); + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.model = model as ModelType; session.mask.modelConfig.providerName = providerName as ServiceProvider; @@ -684,7 +688,7 @@ export function ChatActions(props: { onSelection={(s) => { if (s.length === 0) return; const size = s[0]; - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.size = size; }); showToast(size); @@ -711,7 +715,7 @@ export function ChatActions(props: { onSelection={(q) => { if (q.length === 0) return; const quality = q[0]; - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.quality = quality; }); showToast(quality); @@ -738,7 +742,7 @@ export function ChatActions(props: { onSelection={(s) => { if (s.length === 0) return; const style = s[0]; - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.modelConfig.style = style; }); showToast(style); @@ -769,7 +773,7 @@ export function ChatActions(props: { }))} onClose={() => setShowPluginSelector(false)} onSelection={(s) => { - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { session.mask.plugin = s as string[]; }); }} @@ -812,7 +816,8 @@ export function EditMessageModal(props: { onClose: () => void }) { icon={} key="ok" onClick={() => { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.messages = messages), ); props.onClose(); @@ -829,7 +834,8 @@ export function EditMessageModal(props: { onClose: () => void }) { type="text" value={session.topic} onInput={(e) => - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.topic = e.currentTarget.value), ) } @@ -990,7 +996,8 @@ function _Chat() { prev: () => chatStore.nextSession(-1), next: () => chatStore.nextSession(1), clear: () => - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.clearContextIndex = session.messages.length), ), fork: () => chatStore.forkSession(), @@ -1061,7 +1068,7 @@ function _Chat() { }; useEffect(() => { - chatStore.updateCurrentSession((session) => { + chatStore.updateTargetSession(session, (session) => { const stopTiming = Date.now() - REQUEST_TIMEOUT_MS; session.messages.forEach((m) => { // check if should stop all stale messages @@ -1087,7 +1094,7 @@ function _Chat() { } }); // eslint-disable-next-line react-hooks/exhaustive-deps - }, []); + }, [session]); // check if should send message const onInputKeyDown = (e: React.KeyboardEvent) => { @@ -1118,7 +1125,8 @@ function _Chat() { }; const deleteMessage = (msgId?: string) => { - chatStore.updateCurrentSession( + chatStore.updateTargetSession( + session, (session) => (session.messages = session.messages.filter((m) => m.id !== msgId)), ); @@ -1185,7 +1193,7 @@ function _Chat() { }; const onPinMessage = (message: ChatMessage) => { - chatStore.updateCurrentSession((session) => + chatStore.updateTargetSession(session, (session) => session.mask.context.push(message), ); @@ -1588,9 +1596,12 @@ function _Chat() {
)} -
+
setIsEditingMessage(true)} > {!session.topic ? DEFAULT_TOPIC : session.topic} @@ -1607,7 +1618,7 @@ function _Chat() { title={Locale.Chat.Actions.RefreshTitle} onClick={() => { showToast(Locale.Chat.Actions.RefreshToast); - chatStore.summarizeSession(true); + chatStore.summarizeSession(true, session); }} />
@@ -1711,14 +1722,17 @@ function _Chat() { }); } } - chatStore.updateCurrentSession((session) => { - const m = session.mask.context - .concat(session.messages) - .find((m) => m.id === message.id); - if (m) { - m.content = newContent; - } - }); + chatStore.updateTargetSession( + session, + (session) => { + const m = session.mask.context + .concat(session.messages) + .find((m) => m.id === message.id); + if (m) { + m.content = newContent; + } + }, + ); }} >
@@ -1861,7 +1875,7 @@ function _Chat() { )} {getMessageImages(message).length > 1 && (