diff --git a/README.md b/README.md index b9e994e50..fce62ba37 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [Open in Gitpod](https://www.bt.cn/new/download.html) +[Deploy on Vercel](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [BT Deply Install](https://www.bt.cn/new/download.html) [Deploy to Alibaba Cloud](https://computenest.aliyun.com/market/service-f1c9b75e59814dc49d52) [](https://monica.im/?utm=nxcrp) @@ -301,6 +301,14 @@ iflytek Api Key. iflytek Api Secret. +### `CHATGLM_API_KEY` (optional) + +ChatGLM Api Key. + +### `CHATGLM_URL` (optional) + +ChatGLM Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -397,6 +405,9 @@ yarn dev > [简体中文 > 如何部署到私人服务器](./README_CN.md#部署) +### BT Install +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### Docker (Recommended) ```shell diff --git a/README_CN.md b/README_CN.md index 3f339ea61..d4da8b9da 100644 --- a/README_CN.md +++ b/README_CN.md @@ -184,6 +184,13 @@ ByteDance Api Url. 讯飞星火Api Secret. +### `CHATGLM_API_KEY` (可选) + +ChatGLM Api Key. + +### `CHATGLM_URL` (可选) + +ChatGLM Api Url. ### `HIDE_USER_API_KEY` (可选) @@ -264,6 +271,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy ## 部署 +### 宝塔面板部署 +> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md) + ### 容器部署 (推荐) > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 100c43714..f5f051d77 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -12,6 +12,7 @@ import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; import { handle as xaiHandler } from "../../xai"; +import { handle as chatglmHandler } from "../../glm"; import { handle as proxyHandler } from "../../proxy"; async function handle( @@ -45,6 +46,8 @@ async function handle( return iflytekHandler(req, { params }); case ApiPath.XAI: return xaiHandler(req, { params }); + case ApiPath.ChatGLM: + return chatglmHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/auth.ts b/app/api/auth.ts index bb8ee1474..e56f85820 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -117,6 +117,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.XAI: systemApiKey = serverConfig.xaiApiKey; break; + case ModelProvider.ChatGLM: + systemApiKey = serverConfig.chatglmApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/glm.ts b/app/api/glm.ts new file mode 100644 index 000000000..3625b9f7b --- /dev/null +++ b/app/api/glm.ts @@ -0,0 +1,129 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + CHATGLM_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[GLM Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.ChatGLM); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[GLM] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, ""); + + let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + console.log("[Fetch Url] ", fetchUrl); + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ChatGLM as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[GLM] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index e7ba2fd5d..3079a9276 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -22,6 +22,7 @@ import { HunyuanApi } from "./platforms/tencent"; import { MoonshotApi } from "./platforms/moonshot"; import { SparkApi } from "./platforms/iflytek"; import { XAIApi } from "./platforms/xai"; +import { ChatGLMApi } from "./platforms/glm"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -78,7 +79,7 @@ export interface ChatOptions { config: LLMConfig; onUpdate?: (message: string, chunk: string) => void; - onFinish: (message: string) => void; + onFinish: (message: string, responseRes: Response) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; onBeforeTool?: (tool: ChatMessageTool) => void; @@ -168,6 +169,9 @@ export class ClientApi { case ModelProvider.XAI: this.llm = new XAIApi(); break; + case ModelProvider.ChatGLM: + this.llm = new ChatGLMApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -257,6 +261,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; const isXAI = modelConfig.providerName === ServiceProvider.XAI; + const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -274,6 +279,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { ? accessStore.moonshotApiKey : isXAI ? accessStore.xaiApiKey + : isChatGLM + ? accessStore.chatglmApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -290,6 +297,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isMoonshot, isIflytek, isXAI, + isChatGLM, apiKey, isEnabledAccessControl, }; @@ -372,6 +380,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Iflytek); case ServiceProvider.XAI: return new ClientApi(ModelProvider.XAI); + case ServiceProvider.ChatGLM: + return new ClientApi(ModelProvider.ChatGLM); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 86229a147..6fe69e87a 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -143,6 +143,7 @@ export class QwenApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -172,7 +173,7 @@ export class QwenApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -188,6 +189,7 @@ export class QwenApi implements LLMApi { "[Alibaba] request response content type: ", contentType, ); + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); @@ -254,7 +256,7 @@ export class QwenApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 3645cbe6e..6747221a8 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -317,13 +317,14 @@ export class ClaudeApi implements LLMApi { }; try { - controller.signal.onabort = () => options.onFinish(""); + controller.signal.onabort = () => + options.onFinish("", new Response(null, { status: 400 })); const res = await fetch(path, payload); const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } catch (e) { console.error("failed to chat", e); options.onError?.(e as Error); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 2511a696b..9e8c2f139 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -162,6 +162,7 @@ export class ErnieApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -191,7 +192,7 @@ export class ErnieApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -204,7 +205,7 @@ export class ErnieApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Baidu] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -267,7 +268,7 @@ export class ErnieApi implements LLMApi { const resJson = await res.json(); const message = resJson?.result; - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 000a9e278..a2f0660d8 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -130,6 +130,7 @@ export class DoubaoApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -159,7 +160,7 @@ export class DoubaoApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -175,7 +176,7 @@ export class DoubaoApi implements LLMApi { "[ByteDance] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -241,7 +242,7 @@ export class DoubaoApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts new file mode 100644 index 000000000..a7965947f --- /dev/null +++ b/app/client/platforms/glm.ts @@ -0,0 +1,197 @@ +"use client"; +import { + ApiPath, + CHATGLM_BASE_URL, + ChatGLM, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class ChatGLMApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.chatglmUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.ChatGLM; + baseUrl = isApp ? CHATGLM_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] glm payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(ChatGLM.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index a4b594ddf..53ff00aee 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -274,7 +274,7 @@ export class GeminiProApi implements LLMApi { ); } const message = apiClient.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index 55a39d0cc..cfc37b3b2 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -117,6 +117,7 @@ export class SparkApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // Animate response text to make it look smooth function animateResponseText() { @@ -143,7 +144,7 @@ export class SparkApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -156,7 +157,7 @@ export class SparkApi implements LLMApi { clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Spark] request response content type: ", contentType); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -231,7 +232,7 @@ export class SparkApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index 22a34b2e2..b6812c0d7 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -180,7 +180,7 @@ export class MoonshotApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 30f7415c1..6e893ed14 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -361,7 +361,7 @@ export class ChatGPTApi implements LLMApi { const resJson = await res.json(); const message = await this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 3610fac0a..580844a5b 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -142,6 +142,7 @@ export class HunyuanApi implements LLMApi { let responseText = ""; let remainText = ""; let finished = false; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -171,7 +172,7 @@ export class HunyuanApi implements LLMApi { const finish = () => { if (!finished) { finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); } }; @@ -187,7 +188,7 @@ export class HunyuanApi implements LLMApi { "[Tencent] request response content type: ", contentType, ); - + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); return finish(); @@ -253,7 +254,7 @@ export class HunyuanApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index deb74e66c..06dbaaa29 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -173,7 +173,7 @@ export class XAIApi implements LLMApi { const resJson = await res.json(); const message = this.extractMessage(resJson); - options.onFinish(message); + options.onFinish(message, res); } } catch (e) { console.log("[Request] failed to make a chat request", e); diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 3d5b6a4f2..a62021db3 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -1607,7 +1607,7 @@ function _Chat() { title={Locale.Chat.Actions.RefreshTitle} onClick={() => { showToast(Locale.Chat.Actions.RefreshToast); - chatStore.summarizeSession(true); + chatStore.summarizeSession(true, session); }} /> diff --git a/app/components/settings.tsx b/app/components/settings.tsx index ddd6a5c15..0bd570826 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -72,6 +72,7 @@ import { Stability, Iflytek, SAAS_CHAT_URL, + ChatGLM, } from "../constant"; import { Prompt, SearchService, usePromptStore } from "../store/prompt"; import { ErrorBoundary } from "./error"; @@ -1302,6 +1303,47 @@ export function Settings() { ); + const chatglmConfigComponent = accessStore.provider === + ServiceProvider.ChatGLM && ( + <> + + + accessStore.update( + (access) => (access.chatglmUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.chatglmApiKey = e.currentTarget.value), + ); + }} + /> + + + ); + const stabilityConfigComponent = accessStore.provider === ServiceProvider.Stability && ( <> @@ -1762,6 +1804,7 @@ export function Settings() { {stabilityConfigComponent} {lflytekConfigComponent} {XAIConfigComponent} + {chatglmConfigComponent} )} diff --git a/app/config/server.ts b/app/config/server.ts index 5250b0610..0437ed534 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -80,6 +80,10 @@ declare global { XAI_URL?: string; XAI_API_KEY?: string; + // chatglm only + CHATGLM_URL?: string; + CHATGLM_API_KEY?: string; + // custom template for preprocessing user input DEFAULT_INPUT_TEMPLATE?: string; } @@ -156,6 +160,7 @@ export const getServerSideConfig = () => { const isMoonshot = !!process.env.MOONSHOT_API_KEY; const isIflytek = !!process.env.IFLYTEK_API_KEY; const isXAI = !!process.env.XAI_API_KEY; + const isChatGLM = !!process.env.CHATGLM_API_KEY; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const randomIndex = Math.floor(Math.random() * apiKeys.length); @@ -227,6 +232,10 @@ export const getServerSideConfig = () => { xaiUrl: process.env.XAI_URL, xaiApiKey: getApiKey(process.env.XAI_API_KEY), + isChatGLM, + chatglmUrl: process.env.CHATGLM_URL, + chatglmApiKey: getApiKey(process.env.CHATGLM_API_KEY), + cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID, cloudflareKVNamespaceId: process.env.CLOUDFLARE_KV_NAMESPACE_ID, cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY), diff --git a/app/constant.ts b/app/constant.ts index 1b8aa49d8..d15a2fe79 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -32,6 +32,8 @@ export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com"; export const XAI_BASE_URL = "https://api.x.ai"; +export const CHATGLM_BASE_URL = "https://open.bigmodel.cn"; + export const CACHE_URL_PREFIX = "/api/cache"; export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`; @@ -65,6 +67,7 @@ export enum ApiPath { Stability = "/api/stability", Artifacts = "/api/artifacts", XAI = "/api/xai", + ChatGLM = "/api/chatglm", } export enum SlotID { @@ -119,6 +122,7 @@ export enum ServiceProvider { Iflytek = "Iflytek", XAI = "XAI", Bedrock = "Bedrock", + ChatGLM = "ChatGLM", } // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings @@ -143,6 +147,7 @@ export enum ModelProvider { Moonshot = "Moonshot", Iflytek = "Iflytek", XAI = "XAI", + ChatGLM = "ChatGLM", } export const Stability = { @@ -230,6 +235,11 @@ export const XAI = { ChatPath: "v1/chat/completions", }; +export const ChatGLM = { + ExampleEndpoint: CHATGLM_BASE_URL, + ChatPath: "/api/paas/v4/chat/completions", +}; + export const Bedrock = { ChatPath: "converse", }; @@ -342,11 +352,12 @@ const anthropicModels = [ "claude-2.1", "claude-3-sonnet-20240229", "claude-3-opus-20240229", + "claude-3-opus-latest", "claude-3-haiku-20240307", "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", - "claude-3-opus-latest", + "claude-3-5-haiku-latest", ]; const baiduModels = [ @@ -404,6 +415,17 @@ const iflytekModels = [ const xAIModes = ["grok-beta"]; +const chatglmModels = [ + "glm-4-plus", + "glm-4-0520", + "glm-4", + "glm-4-air", + "glm-4-airx", + "glm-4-long", + "glm-4-flashx", + "glm-4-flash", +]; + let seq = 1000; // 内置的模型序号生成器从1000开始 export const DEFAULT_MODELS = [ ...openaiModels.map((name) => ({ @@ -527,6 +549,19 @@ export const DEFAULT_MODELS = [ sorted: 11, }, })), + + ...chatglmModels.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "chatglm", + providerName: "ChatGLM", + providerType: "chatglm", + sorted: 12, + }, + })), + ...bedrockModels.map((name) => ({ name, available: true, diff --git a/app/locales/cn.ts b/app/locales/cn.ts index cdf99f197..13ebdc580 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -500,6 +500,17 @@ const cn = { SubTitle: "样例:", }, }, + ChatGLM: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义 ChatGLM API Key", + Placeholder: "ChatGLM API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, Stability: { ApiKey: { Title: "接口密钥", diff --git a/app/locales/en.ts b/app/locales/en.ts index 12107ac7c..146f7c6cb 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -484,6 +484,17 @@ const en: LocaleType = { SubTitle: "Example: ", }, }, + ChatGLM: { + ApiKey: { + Title: "ChatGLM API Key", + SubTitle: "Use a custom ChatGLM API Key", + Placeholder: "ChatGLM API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, Stability: { ApiKey: { Title: "Stability API Key", diff --git a/app/store/access.ts b/app/store/access.ts index b5f765cfc..7c8f64da8 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -15,6 +15,7 @@ import { STABILITY_BASE_URL, IFLYTEK_BASE_URL, XAI_BASE_URL, + CHATGLM_BASE_URL, } from "../constant"; import { getHeaders } from "../client/api"; import { getClientConfig } from "../config/client"; @@ -49,6 +50,8 @@ const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek; const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI; +const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM; + const DEFAULT_ACCESS_STATE = { accessCode: "", useCustomConfig: false, @@ -117,6 +120,10 @@ const DEFAULT_ACCESS_STATE = { xaiUrl: DEFAULT_XAI_URL, xaiApiKey: "", + // chatglm + chatglmUrl: DEFAULT_CHATGLM_URL, + chatglmApiKey: "", + // server config needCode: true, hideUserApiKey: false, @@ -193,6 +200,10 @@ export const useAccessStore = createPersistStore( return ensure(get(), ["xaiApiKey"]); }, + isValidChatGLM() { + return ensure(get(), ["chatglmApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -210,6 +221,7 @@ export const useAccessStore = createPersistStore( this.isValidMoonshot() || this.isValidIflytek() || this.isValidXAI() || + this.isValidChatGLM() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) ); diff --git a/app/store/chat.ts b/app/store/chat.ts index 6900899e1..4208692e2 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -352,13 +352,13 @@ export const useChatStore = createPersistStore( return session; }, - onNewMessage(message: ChatMessage) { - get().updateCurrentSession((session) => { + onNewMessage(message: ChatMessage, targetSession: ChatSession) { + get().updateTargetSession(targetSession, (session) => { session.messages = session.messages.concat(); session.lastUpdate = Date.now(); }); get().updateStat(message); - get().summarizeSession(); + get().summarizeSession(false, targetSession); }, async onUserInput(content: string, attachImages?: string[]) { @@ -428,7 +428,7 @@ export const useChatStore = createPersistStore( botMessage.streaming = false; if (message) { botMessage.content = message; - get().onNewMessage(botMessage); + get().onNewMessage(botMessage, session); } ChatControllerPool.remove(session.id, botMessage.id); }, @@ -598,9 +598,12 @@ export const useChatStore = createPersistStore( }); }, - summarizeSession(refreshTitle: boolean = false) { + summarizeSession( + refreshTitle: boolean = false, + targetSession: ChatSession, + ) { const config = useAppConfig.getState(); - const session = get().currentSession(); + const session = targetSession; const modelConfig = session.mask.modelConfig; // skip summarize when using dalle3? if (isDalle3(modelConfig.model)) { @@ -649,13 +652,15 @@ export const useChatStore = createPersistStore( stream: false, providerName, }, - onFinish(message) { - if (!isValidMessage(message)) return; - get().updateCurrentSession( - (session) => - (session.topic = - message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC), - ); + onFinish(message, responseRes) { + if (responseRes?.status === 200) { + get().updateTargetSession( + session, + (session) => + (session.topic = + message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC), + ); + } }, }); } @@ -669,7 +674,7 @@ export const useChatStore = createPersistStore( const historyMsgLength = countMessages(toBeSummarizedMsgs); - if (historyMsgLength > modelConfig?.max_tokens ?? 4000) { + if (historyMsgLength > (modelConfig?.max_tokens || 4000)) { const n = toBeSummarizedMsgs.length; toBeSummarizedMsgs = toBeSummarizedMsgs.slice( Math.max(0, n - modelConfig.historyMessageCount), @@ -715,22 +720,20 @@ export const useChatStore = createPersistStore( onUpdate(message) { session.memoryPrompt = message; }, - onFinish(message) { - console.log("[Memory] ", message); - get().updateCurrentSession((session) => { - session.lastSummarizeIndex = lastSummarizeIndex; - session.memoryPrompt = message; // Update the memory prompt for stored it in local storage - }); + onFinish(message, responseRes) { + if (responseRes?.status === 200) { + console.log("[Memory] ", message); + get().updateTargetSession(session, (session) => { + session.lastSummarizeIndex = lastSummarizeIndex; + session.memoryPrompt = message; // Update the memory prompt for stored it in local storage + }); + } }, onError(err) { console.error("[Summarize] ", err); }, }); } - - function isValidMessage(message: any): boolean { - return typeof message === "string" && !message.startsWith("```json"); - } }, updateStat(message: ChatMessage) { @@ -746,7 +749,16 @@ export const useChatStore = createPersistStore( updater(sessions[index]); set(() => ({ sessions })); }, - + updateTargetSession( + targetSession: ChatSession, + updater: (session: ChatSession) => void, + ) { + const sessions = get().sessions; + const index = sessions.findIndex((s) => s.id === targetSession.id); + if (index < 0) return; + updater(sessions[index]); + set(() => ({ sessions })); + }, async clearAllData() { await indexedDBStorage.clear(); localStorage.clear(); diff --git a/app/utils.ts b/app/utils.ts index 50cb4b0da..96228da59 100644 --- a/app/utils.ts +++ b/app/utils.ts @@ -266,7 +266,9 @@ export function isVisionModel(model: string) { model.includes("gpt-4-turbo") && !model.includes("preview"); return ( - visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo + visionKeywords.some((keyword) => model.includes(keyword)) || + isGpt4Turbo || + isDalle3(model) ); } @@ -278,7 +280,8 @@ export function showPlugins(provider: ServiceProvider, model: string) { if ( provider == ServiceProvider.OpenAI || provider == ServiceProvider.Azure || - provider == ServiceProvider.Moonshot + provider == ServiceProvider.Moonshot || + provider == ServiceProvider.ChatGLM ) { return true; } diff --git a/app/utils/chat.ts b/app/utils/chat.ts index ba1904625..9209b5da5 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -174,6 +174,7 @@ export function stream( let finished = false; let running = false; let runTools: any[] = []; + let responseRes: Response; // animate response to make it looks smooth function animateResponseText() { @@ -272,7 +273,7 @@ export function stream( } console.debug("[ChatAPI] end"); finished = true; - options.onFinish(responseText + remainText); + options.onFinish(responseText + remainText, responseRes); // 将res传递给onFinish } }; @@ -304,6 +305,7 @@ export function stream( clearTimeout(requestTimeoutId); const contentType = res.headers.get("content-type"); console.log("[Request] response content type: ", contentType); + responseRes = res; if (contentType?.startsWith("text/plain")) { responseText = await res.clone().text(); diff --git a/app/utils/stream.ts b/app/utils/stream.ts index 782634595..f186730f6 100644 --- a/app/utils/stream.ts +++ b/app/utils/stream.ts @@ -19,7 +19,7 @@ type StreamResponse = { headers: Record; }; -export function fetch(url: string, options?: RequestInit): Promise { +export function fetch(url: string, options?: RequestInit): Promise { if (window.__TAURI__) { const { signal, diff --git a/docs/bt-cn.md b/docs/bt-cn.md new file mode 100644 index 000000000..115fbbd70 --- /dev/null +++ b/docs/bt-cn.md @@ -0,0 +1,29 @@ +# 宝塔面板 的部署说明 + +## 拥有自己的宝塔 +当你需要通过 宝塔面板 部署本项目之前,需要在服务器上先安装好 宝塔面板工具。 接下来的 部署流程 都建立在已有宝塔面板的前提下。宝塔安装请参考 ([宝塔官网](https://www.bt.cn/new/download.html)) + +> 注意:本项目需要宝塔面板版本 9.2.0 及以上 + +## 一键安装 +![bt-install-1](./images/bt/bt-install-1.jpeg) +1. 在 宝塔面板 -> Docker -> 应用商店 页面,搜索 ChatGPT-Next-Web 找到本项目的docker应用; +2. 点击 安装 开始部署本项目 + +![bt-install-2](./images/bt/bt-install-2.jpeg) +1. 在项目配置页,根据要求开始配置环境变量; +2. 如勾选 允许外部访问 配置,请注意为配置的 web端口 开放安全组端口访问权限; +3. 请确保你添加了正确的 Open Api Key,否则无法使用;当配置 OpenAI官方 提供的key(国内无法访问),请配置代理地址; +4. 建议配置 访问权限密码,否则部署后所有人均可使用已配置的 Open Api Key(当允许外部访问时); +5. 点击 确认 开始自动部署。 + +## 如何访问 +![bt-install-3](./images/bt/bt-install-3.jpeg) +通过根据 服务器IP地址 和配置的 web端口 http://$(host):$(port),在浏览器中打开 ChatGPT-Next-Web。 + +![bt-install-4](./images/bt/bt-install-4.jpeg) +若配置了 访问权限密码,访问大模型前需要登录,请点击 登录,获取访问权限。 + +![bt-install-5](./images/bt/bt-install-5.jpeg) + +![bt-install-6](./images/bt/bt-install-6.jpeg) diff --git a/docs/images/bt/bt-install-1.jpeg b/docs/images/bt/bt-install-1.jpeg new file mode 100644 index 000000000..fff3406d6 Binary files /dev/null and b/docs/images/bt/bt-install-1.jpeg differ diff --git a/docs/images/bt/bt-install-2.jpeg b/docs/images/bt/bt-install-2.jpeg new file mode 100644 index 000000000..77256ef8d Binary files /dev/null and b/docs/images/bt/bt-install-2.jpeg differ diff --git a/docs/images/bt/bt-install-3.jpeg b/docs/images/bt/bt-install-3.jpeg new file mode 100644 index 000000000..7790f89e8 Binary files /dev/null and b/docs/images/bt/bt-install-3.jpeg differ diff --git a/docs/images/bt/bt-install-4.jpeg b/docs/images/bt/bt-install-4.jpeg new file mode 100644 index 000000000..38d7caee4 Binary files /dev/null and b/docs/images/bt/bt-install-4.jpeg differ diff --git a/docs/images/bt/bt-install-5.jpeg b/docs/images/bt/bt-install-5.jpeg new file mode 100644 index 000000000..aa1a7963c Binary files /dev/null and b/docs/images/bt/bt-install-5.jpeg differ diff --git a/docs/images/bt/bt-install-6.jpeg b/docs/images/bt/bt-install-6.jpeg new file mode 100644 index 000000000..42359e65b Binary files /dev/null and b/docs/images/bt/bt-install-6.jpeg differ diff --git a/package.json b/package.json index d1c8e8278..69196b10a 100644 --- a/package.json +++ b/package.json @@ -59,7 +59,7 @@ "@tauri-apps/cli": "1.5.11", "@testing-library/dom": "^10.4.0", "@testing-library/jest-dom": "^6.6.2", - "@testing-library/react": "^16.0.0", + "@testing-library/react": "^16.0.1", "@types/jest": "^29.5.14", "@types/js-yaml": "4.0.9", "@types/lodash-es": "^4.17.12", diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index 415825b13..7e08d9070 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -9,7 +9,7 @@ }, "package": { "productName": "NextChat", - "version": "2.15.6" + "version": "2.15.7" }, "tauri": { "allowlist": { diff --git a/yarn.lock b/yarn.lock index 9f8aa9f61..16b8b872e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1201,14 +1201,7 @@ resolved "https://registry.yarnpkg.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310" integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== -"@babel/runtime@^7.12.1", "@babel/runtime@^7.20.7", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": - version "7.23.6" - resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.6.tgz#c05e610dc228855dc92ef1b53d07389ed8ab521d" - integrity sha512-zHd0eUrf5GZoOWVCXp6koAKQTfZV07eit6bGPmJgnZdnSAvvZee6zniW2XMF7Cmc4ISOOnPy3QaSiIJGJkVEDQ== - dependencies: - regenerator-runtime "^0.14.0" - -"@babel/runtime@^7.12.5", "@babel/runtime@^7.21.0": +"@babel/runtime@^7.12.1", "@babel/runtime@^7.12.5", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": version "7.25.0" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.0.tgz#3af9a91c1b739c569d5d80cc917280919c544ecb" integrity sha512-7dRy4DwXwtzBrPbZflqxnvfxLF8kdZXPkhymtDeFoFqE6ldzjQFgYTtYIFARcLEYDrqfBfYcZt1WqFxRoyC9Rw== @@ -2134,10 +2127,10 @@ lodash "^4.17.21" redent "^3.0.0" -"@testing-library/react@^16.0.0": - version "16.0.0" - resolved "https://registry.npmmirror.com/@testing-library/react/-/react-16.0.0.tgz#0a1e0c7a3de25841c3591b8cb7fb0cf0c0a27321" - integrity sha512-guuxUKRWQ+FgNX0h0NS0FIq3Q3uLtWVpBzcLOggmfMoUpgBnzBzvLLd4fbm6yS8ydJd94cIfY4yP9qUQjM2KwQ== +"@testing-library/react@^16.0.1": + version "16.0.1" + resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.0.1.tgz#29c0ee878d672703f5e7579f239005e4e0faa875" + integrity sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg== dependencies: "@babel/runtime" "^7.12.5"