From ad9ab9d45afa384718a59bce23d9b70e3e8ed08a Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Tue, 4 Feb 2025 15:02:18 +0800 Subject: [PATCH 01/37] New provider SiliconFlow and Its Latest DeekSeek Models Update README.md Update constant.ts Update README_CN.md --- README.md | 7 + README_CN.md | 7 + app/api/[provider]/[...path]/route.ts | 3 + app/api/auth.ts | 3 + app/api/siliconflow.ts | 128 ++++++++++++++ app/client/api.ts | 12 ++ app/client/platforms/siliconflow.ts | 243 ++++++++++++++++++++++++++ app/components/settings.tsx | 42 +++++ app/config/server.ts | 9 + app/constant.ts | 38 +++- app/locales/cn.ts | 11 ++ app/locales/en.ts | 11 ++ app/store/access.ts | 14 ++ 13 files changed, 527 insertions(+), 1 deletion(-) create mode 100644 app/api/siliconflow.ts create mode 100644 app/client/platforms/siliconflow.ts diff --git a/README.md b/README.md index d6e99fca9..d391bdbff 100644 --- a/README.md +++ b/README.md @@ -352,6 +352,13 @@ Customize Stability API url. Enable MCP(Model Context Protocol)Feature +### `SILICONFLOW_API_KEY` (optional) + +SiliconFlow API Key. + +### `SILICONFLOW_URL` (optional) + +SiliconFlow API URL. ## Requirements diff --git a/README_CN.md b/README_CN.md index b23ea790d..f6f4c0be5 100644 --- a/README_CN.md +++ b/README_CN.md @@ -267,6 +267,13 @@ Stability API密钥 启用MCP(Model Context Protocol)功能 +### `SILICONFLOW_API_KEY` (optional) + +SiliconFlow API Key. + +### `SILICONFLOW_URL` (optional) + +SiliconFlow API URL. ## 开发 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 3b5833d7e..8975bf971 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -11,6 +11,7 @@ import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; import { handle as deepseekHandler } from "../../deepseek"; +import { handle as siliconflowHandler } from "../../siliconflow"; import { handle as xaiHandler } from "../../xai"; import { handle as chatglmHandler } from "../../glm"; import { handle as proxyHandler } from "../../proxy"; @@ -47,6 +48,8 @@ async function handle( return xaiHandler(req, { params }); case ApiPath.ChatGLM: return chatglmHandler(req, { params }); + case ApiPath.SiliconFlow: + return siliconflowHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/auth.ts b/app/api/auth.ts index 1760c249c..8c78c70c8 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -101,6 +101,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.ChatGLM: systemApiKey = serverConfig.chatglmApiKey; break; + case ModelProvider.SiliconFlow: + systemApiKey = serverConfig.siliconFlowApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/siliconflow.ts b/app/api/siliconflow.ts new file mode 100644 index 000000000..e298a21d4 --- /dev/null +++ b/app/api/siliconflow.ts @@ -0,0 +1,128 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + SILICONFLOW_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelNotavailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[SiliconFlow Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.SiliconFlow); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[SiliconFlow] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.SiliconFlow, ""); + + let baseUrl = serverConfig.siliconFlowUrl || SILICONFLOW_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelNotavailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.SiliconFlow as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[SiliconFlow] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index 8f263763b..64ac82b2a 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -23,6 +23,7 @@ import { SparkApi } from "./platforms/iflytek"; import { DeepSeekApi } from "./platforms/deepseek"; import { XAIApi } from "./platforms/xai"; import { ChatGLMApi } from "./platforms/glm"; +import { SiliconflowApi } from "./platforms/siliconflow"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -164,6 +165,9 @@ export class ClientApi { case ModelProvider.ChatGLM: this.llm = new ChatGLMApi(); break; + case ModelProvider.SiliconFlow: + this.llm = new SiliconflowApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -254,6 +258,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek; const isXAI = modelConfig.providerName === ServiceProvider.XAI; const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM; + const isSiliconFlow = + modelConfig.providerName === ServiceProvider.SiliconFlow; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -273,6 +279,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { ? accessStore.deepseekApiKey : isChatGLM ? accessStore.chatglmApiKey + : isSiliconFlow + ? accessStore.siliconflowApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -290,6 +298,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isDeepSeek, isXAI, isChatGLM, + isSiliconFlow, apiKey, isEnabledAccessControl, }; @@ -317,6 +326,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isDeepSeek, isXAI, isChatGLM, + isSiliconFlow, apiKey, isEnabledAccessControl, } = getConfig(); @@ -365,6 +375,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.XAI); case ServiceProvider.ChatGLM: return new ClientApi(ModelProvider.ChatGLM); + case ServiceProvider.SiliconFlow: + return new ClientApi(ModelProvider.SiliconFlow); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts new file mode 100644 index 000000000..fe2f9862b --- /dev/null +++ b/app/client/platforms/siliconflow.ts @@ -0,0 +1,243 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { + ApiPath, + SILICONFLOW_BASE_URL, + SiliconFlow, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { streamWithThink } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { + getMessageTextContent, + getMessageTextContentWithoutThinking, +} from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class SiliconflowApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.siliconflowUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.SiliconFlow; + baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if ( + !baseUrl.startsWith("http") && + !baseUrl.startsWith(ApiPath.SiliconFlow) + ) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + if (v.role === "assistant") { + const content = getMessageTextContentWithoutThinking(v); + messages.push({ role: v.role, content }); + } else { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] openai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(SiliconFlow.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // console.log(chatPayload); + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return streamWithThink( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string | null; + tool_calls: ChatMessageTool[]; + reasoning_content: string | null; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + const reasoning = choices[0]?.delta?.reasoning_content; + const content = choices[0]?.delta?.content; + + // Skip if both content and reasoning_content are empty or null + if ( + (!reasoning || reasoning.trim().length === 0) && + (!content || content.trim().length === 0) + ) { + return { + isThinking: false, + content: "", + }; + } + + if (reasoning && reasoning.trim().length > 0) { + return { + isThinking: true, + content: reasoning, + }; + } else if (content && content.trim().length > 0) { + return { + isThinking: false, + content: content, + }; + } + + return { + isThinking: false, + content: "", + }; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/components/settings.tsx b/app/components/settings.tsx index 3b990ed2c..68ebcf084 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -74,6 +74,7 @@ import { SAAS_CHAT_URL, ChatGLM, DeepSeek, + SiliconFlow, } from "../constant"; import { Prompt, SearchService, usePromptStore } from "../store/prompt"; import { ErrorBoundary } from "./error"; @@ -1318,6 +1319,46 @@ export function Settings() { ); + const siliconflowConfigComponent = accessStore.provider === + ServiceProvider.SiliconFlow && ( + <> + + + accessStore.update( + (access) => (access.siliconflowUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.siliconflowApiKey = e.currentTarget.value), + ); + }} + /> + + + ); const stabilityConfigComponent = accessStore.provider === ServiceProvider.Stability && ( @@ -1780,6 +1821,7 @@ export function Settings() { {lflytekConfigComponent} {XAIConfigComponent} {chatglmConfigComponent} + {siliconflowConfigComponent} )} diff --git a/app/config/server.ts b/app/config/server.ts index 1166805b5..43d4ff833 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -84,6 +84,10 @@ declare global { CHATGLM_URL?: string; CHATGLM_API_KEY?: string; + // siliconflow only + SILICONFLOW_URL?: string; + SILICONFLOW_API_KEY?: string; + // custom template for preprocessing user input DEFAULT_INPUT_TEMPLATE?: string; @@ -158,6 +162,7 @@ export const getServerSideConfig = () => { const isDeepSeek = !!process.env.DEEPSEEK_API_KEY; const isXAI = !!process.env.XAI_API_KEY; const isChatGLM = !!process.env.CHATGLM_API_KEY; + const isSiliconFlow = !!process.env.SILICONFLOW_API_KEY; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const randomIndex = Math.floor(Math.random() * apiKeys.length); @@ -237,6 +242,10 @@ export const getServerSideConfig = () => { cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY), cloudflareKVTTL: process.env.CLOUDFLARE_KV_TTL, + isSiliconFlow, + siliconFlowUrl: process.env.SILICONFLOW_URL, + siliconFlowApiKey: getApiKey(process.env.SILICONFLOW_API_KEY), + gtmId: process.env.GTM_ID, gaId: process.env.GA_ID || DEFAULT_GA_ID, diff --git a/app/constant.ts b/app/constant.ts index 60200af41..32e5a2263 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -34,6 +34,8 @@ export const XAI_BASE_URL = "https://api.x.ai"; export const CHATGLM_BASE_URL = "https://open.bigmodel.cn"; +export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn"; + export const CACHE_URL_PREFIX = "/api/cache"; export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`; @@ -69,6 +71,7 @@ export enum ApiPath { XAI = "/api/xai", ChatGLM = "/api/chatglm", DeepSeek = "/api/deepseek", + SiliconFlow = "/api/siliconflow", } export enum SlotID { @@ -125,6 +128,7 @@ export enum ServiceProvider { XAI = "XAI", ChatGLM = "ChatGLM", DeepSeek = "DeepSeek", + SiliconFlow = "SiliconFlow", } // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings @@ -150,6 +154,7 @@ export enum ModelProvider { XAI = "XAI", ChatGLM = "ChatGLM", DeepSeek = "DeepSeek", + SiliconFlow = "SiliconFlow", } export const Stability = { @@ -249,6 +254,11 @@ export const ChatGLM = { VideoPath: "api/paas/v4/videos/generations", }; +export const SiliconFlow = { + ExampleEndpoint: SILICONFLOW_BASE_URL, + ChatPath: "v1/chat/completions", +}; + export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang // export const DEFAULT_SYSTEM_TEMPLATE = ` // You are ChatGPT, a large language model trained by {{ServiceProvider}}. @@ -413,7 +423,7 @@ export const KnowledgeCutOffDate: Record = { "o1-preview-2024-09-12": "2023-10", "o1-preview": "2023-10", "o1-2024-12-17": "2023-10", - "o1": "2023-10", + o1: "2023-10", "o3-mini-2025-01-31": "2023-10", "o3-mini": "2023-10", // After improvements, @@ -597,6 +607,21 @@ const chatglmModels = [ // "cogvideox-flash", // free ]; +const siliconflowModels = [ + "Qwen/Qwen2.5-7B-Instruct", + "Qwen/Qwen2.5-72B-Instruct", + "deepseek-ai/DeepSeek-R1", + "deepseek-ai/DeepSeek-R1-Distill-Llama-70B", + "deepseek-ai/DeepSeek-R1-Distill-Llama-8B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-14B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", + "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", + "deepseek-ai/DeepSeek-V3", + "meta-llama/Llama-3.3-70B-Instruct", + "THUDM/glm-4-9b-chat", +]; + let seq = 1000; // 内置的模型序号生成器从1000开始 export const DEFAULT_MODELS = [ ...openaiModels.map((name) => ({ @@ -742,6 +767,17 @@ export const DEFAULT_MODELS = [ sorted: 13, }, })), + ...siliconflowModels.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "siliconflow", + providerName: "SiliconFlow", + providerType: "siliconflow", + sorted: 14, + }, + })), ] as const; export const CHAT_PAGE_SIZE = 15; diff --git a/app/locales/cn.ts b/app/locales/cn.ts index 39498f662..81b609cde 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -496,6 +496,17 @@ const cn = { SubTitle: "样例:", }, }, + SiliconFlow: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义硅基流动 API Key", + Placeholder: "硅基流动 API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, Stability: { ApiKey: { Title: "接口密钥", diff --git a/app/locales/en.ts b/app/locales/en.ts index 8c2c19f18..8fecf8bf7 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -480,6 +480,17 @@ const en: LocaleType = { SubTitle: "Example: ", }, }, + SiliconFlow: { + ApiKey: { + Title: "SiliconFlow API Key", + SubTitle: "Use a custom SiliconFlow API Key", + Placeholder: "SiliconFlow API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, Stability: { ApiKey: { Title: "Stability API Key", diff --git a/app/store/access.ts b/app/store/access.ts index 1fed5dfed..7025a1814 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -16,6 +16,7 @@ import { DEEPSEEK_BASE_URL, XAI_BASE_URL, CHATGLM_BASE_URL, + SILICONFLOW_BASE_URL, } from "../constant"; import { getHeaders } from "../client/api"; import { getClientConfig } from "../config/client"; @@ -54,6 +55,10 @@ const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI; const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM; +const DEFAULT_SILICONFLOW_URL = isApp + ? SILICONFLOW_BASE_URL + : ApiPath.SiliconFlow; + const DEFAULT_ACCESS_STATE = { accessCode: "", useCustomConfig: false, @@ -123,6 +128,10 @@ const DEFAULT_ACCESS_STATE = { chatglmUrl: DEFAULT_CHATGLM_URL, chatglmApiKey: "", + // siliconflow + siliconflowUrl: DEFAULT_SILICONFLOW_URL, + siliconflowApiKey: "", + // server config needCode: true, hideUserApiKey: false, @@ -206,6 +215,10 @@ export const useAccessStore = createPersistStore( return ensure(get(), ["chatglmApiKey"]); }, + isValidSiliconFlow() { + return ensure(get(), ["siliconflowApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -224,6 +237,7 @@ export const useAccessStore = createPersistStore( this.isValidDeepSeek() || this.isValidXAI() || this.isValidChatGLM() || + this.isValidSiliconFlow() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) ); From 5225a6e1921d170803ab11aa8ba09957cf0b678b Mon Sep 17 00:00:00 2001 From: Eric-2369 Date: Wed, 5 Feb 2025 12:34:00 +0800 Subject: [PATCH 02/37] feat: add more llm icons --- app/components/emoji.tsx | 65 ++++++++++++++++++---- app/components/ui-lib.tsx | 2 + app/icons/llm-icons/chatglm.svg | 14 +++++ app/icons/llm-icons/claude.svg | 8 +++ app/icons/llm-icons/deepseek.svg | 8 +++ app/icons/llm-icons/default.svg | 27 ++++++++++ app/icons/llm-icons/doubao.svg | 14 +++++ app/icons/llm-icons/gemini.svg | 15 ++++++ app/icons/llm-icons/gemma.svg | 15 ++++++ app/icons/llm-icons/grok.svg | 8 +++ app/icons/llm-icons/hunyuan.svg | 17 ++++++ app/icons/llm-icons/meta.svg | 93 ++++++++++++++++++++++++++++++++ app/icons/llm-icons/mistral.svg | 15 ++++++ app/icons/llm-icons/moonshot.svg | 8 +++ app/icons/llm-icons/openai.svg | 8 +++ app/icons/llm-icons/qwen.svg | 14 +++++ app/icons/llm-icons/wenxin.svg | 18 +++++++ 17 files changed, 339 insertions(+), 10 deletions(-) create mode 100644 app/icons/llm-icons/chatglm.svg create mode 100644 app/icons/llm-icons/claude.svg create mode 100644 app/icons/llm-icons/deepseek.svg create mode 100644 app/icons/llm-icons/default.svg create mode 100644 app/icons/llm-icons/doubao.svg create mode 100644 app/icons/llm-icons/gemini.svg create mode 100644 app/icons/llm-icons/gemma.svg create mode 100644 app/icons/llm-icons/grok.svg create mode 100644 app/icons/llm-icons/hunyuan.svg create mode 100644 app/icons/llm-icons/meta.svg create mode 100644 app/icons/llm-icons/mistral.svg create mode 100644 app/icons/llm-icons/moonshot.svg create mode 100644 app/icons/llm-icons/openai.svg create mode 100644 app/icons/llm-icons/qwen.svg create mode 100644 app/icons/llm-icons/wenxin.svg diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 54d1c1c99..6686d8731 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -6,8 +6,21 @@ import EmojiPicker, { import { ModelType } from "../store"; -import BotIcon from "../icons/bot.svg"; -import BlackBotIcon from "../icons/black-bot.svg"; +import BotIconDefault from "../icons/llm-icons/default.svg"; +import BotIconOpenAI from "../icons/llm-icons/openai.svg"; +import BotIconGemini from "../icons/llm-icons/gemini.svg"; +import BotIconGemma from "../icons/llm-icons/gemma.svg"; +import BotIconClaude from "../icons/llm-icons/claude.svg"; +import BotIconMeta from "../icons/llm-icons/meta.svg"; +import BotIconMistral from "../icons/llm-icons/mistral.svg"; +import BotIconDeepseek from "../icons/llm-icons/deepseek.svg"; +import BotIconMoonshot from "../icons/llm-icons/moonshot.svg"; +import BotIconQwen from "../icons/llm-icons/qwen.svg"; +import BotIconWenxin from "../icons/llm-icons/wenxin.svg"; +import BotIconGrok from "../icons/llm-icons/grok.svg"; +import BotIconHunyuan from "../icons/llm-icons/hunyuan.svg"; +import BotIconDoubao from "../icons/llm-icons/doubao.svg"; +import BotIconChatglm from "../icons/llm-icons/chatglm.svg"; export function getEmojiUrl(unified: string, style: EmojiStyle) { // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis @@ -33,17 +46,49 @@ export function AvatarPicker(props: { } export function Avatar(props: { model?: ModelType; avatar?: string }) { + let LlmIcon = BotIconDefault; + if (props.model) { + const modelName = props.model.toLowerCase(); + + if ( + modelName.startsWith("gpt") || + modelName.startsWith("chatgpt") || + modelName.startsWith("o1") || + modelName.startsWith("o3") + ) { + LlmIcon = BotIconOpenAI; + } else if (modelName.startsWith("gemini")) { + LlmIcon = BotIconGemini; + } else if (modelName.startsWith("gemma")) { + LlmIcon = BotIconGemma; + } else if (modelName.startsWith("claude")) { + LlmIcon = BotIconClaude; + } else if (modelName.startsWith("llama")) { + LlmIcon = BotIconMeta; + } else if (modelName.startsWith("mixtral")) { + LlmIcon = BotIconMistral; + } else if (modelName.startsWith("deepseek")) { + LlmIcon = BotIconDeepseek; + } else if (modelName.startsWith("moonshot")) { + LlmIcon = BotIconMoonshot; + } else if (modelName.startsWith("qwen")) { + LlmIcon = BotIconQwen; + } else if (modelName.startsWith("ernie")) { + LlmIcon = BotIconWenxin; + } else if (modelName.startsWith("grok")) { + LlmIcon = BotIconGrok; + } else if (modelName.startsWith("hunyuan")) { + LlmIcon = BotIconHunyuan; + } else if (modelName.startsWith("doubao")) { + LlmIcon = BotIconDoubao; + } else if (modelName.startsWith("glm")) { + LlmIcon = BotIconChatglm; + } + return (
- {props.model?.startsWith("gpt-4") || - props.model?.startsWith("chatgpt-4o") || - props.model?.startsWith("o1") || - props.model?.startsWith("o3") ? ( - - ) : ( - - )} +
); } diff --git a/app/components/ui-lib.tsx b/app/components/ui-lib.tsx index a64265235..7b9f5ace0 100644 --- a/app/components/ui-lib.tsx +++ b/app/components/ui-lib.tsx @@ -23,6 +23,7 @@ import React, { useRef, } from "react"; import { IconButton } from "./button"; +import { Avatar } from "./emoji"; import clsx from "clsx"; export function Popover(props: { @@ -522,6 +523,7 @@ export function Selector(props: { key={i} title={item.title} subTitle={item.subTitle} + icon={} onClick={(e) => { if (item.disable) { e.stopPropagation(); diff --git a/app/icons/llm-icons/chatglm.svg b/app/icons/llm-icons/chatglm.svg new file mode 100644 index 000000000..642750f3e --- /dev/null +++ b/app/icons/llm-icons/chatglm.svg @@ -0,0 +1,14 @@ + + ChatGLM + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/claude.svg b/app/icons/llm-icons/claude.svg new file mode 100644 index 000000000..ca8e447bb --- /dev/null +++ b/app/icons/llm-icons/claude.svg @@ -0,0 +1,8 @@ + + Claude + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/deepseek.svg b/app/icons/llm-icons/deepseek.svg new file mode 100644 index 000000000..30440e316 --- /dev/null +++ b/app/icons/llm-icons/deepseek.svg @@ -0,0 +1,8 @@ + + DeepSeek + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/default.svg b/app/icons/llm-icons/default.svg new file mode 100644 index 000000000..2ebff6b3f --- /dev/null +++ b/app/icons/llm-icons/default.svg @@ -0,0 +1,27 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/doubao.svg b/app/icons/llm-icons/doubao.svg new file mode 100644 index 000000000..79b1b822a --- /dev/null +++ b/app/icons/llm-icons/doubao.svg @@ -0,0 +1,14 @@ + + Doubao + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/gemini.svg b/app/icons/llm-icons/gemini.svg new file mode 100644 index 000000000..587669135 --- /dev/null +++ b/app/icons/llm-icons/gemini.svg @@ -0,0 +1,15 @@ + + Gemini + + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/gemma.svg b/app/icons/llm-icons/gemma.svg new file mode 100644 index 000000000..daf1a035c --- /dev/null +++ b/app/icons/llm-icons/gemma.svg @@ -0,0 +1,15 @@ + + Gemma + + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/grok.svg b/app/icons/llm-icons/grok.svg new file mode 100644 index 000000000..335786777 --- /dev/null +++ b/app/icons/llm-icons/grok.svg @@ -0,0 +1,8 @@ + + Grok + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/hunyuan.svg b/app/icons/llm-icons/hunyuan.svg new file mode 100644 index 000000000..f67930c98 --- /dev/null +++ b/app/icons/llm-icons/hunyuan.svg @@ -0,0 +1,17 @@ + + Hunyuan + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/meta.svg b/app/icons/llm-icons/meta.svg new file mode 100644 index 000000000..75dc40df7 --- /dev/null +++ b/app/icons/llm-icons/meta.svg @@ -0,0 +1,93 @@ + + Meta + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/mistral.svg b/app/icons/llm-icons/mistral.svg new file mode 100644 index 000000000..e577faca5 --- /dev/null +++ b/app/icons/llm-icons/mistral.svg @@ -0,0 +1,15 @@ + + Mistral + + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/moonshot.svg b/app/icons/llm-icons/moonshot.svg new file mode 100644 index 000000000..8ab682d37 --- /dev/null +++ b/app/icons/llm-icons/moonshot.svg @@ -0,0 +1,8 @@ + + MoonshotAI + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/openai.svg b/app/icons/llm-icons/openai.svg new file mode 100644 index 000000000..ac4567f87 --- /dev/null +++ b/app/icons/llm-icons/openai.svg @@ -0,0 +1,8 @@ + + OpenAI + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/qwen.svg b/app/icons/llm-icons/qwen.svg new file mode 100644 index 000000000..857ce2186 --- /dev/null +++ b/app/icons/llm-icons/qwen.svg @@ -0,0 +1,14 @@ + + Qwen + + + + + + + + + + + \ No newline at end of file diff --git a/app/icons/llm-icons/wenxin.svg b/app/icons/llm-icons/wenxin.svg new file mode 100644 index 000000000..0030b0e01 --- /dev/null +++ b/app/icons/llm-icons/wenxin.svg @@ -0,0 +1,18 @@ + + Wenxin + + + + + + + + + + + + + + \ No newline at end of file From e5e5fde924a7598a6c447c079cce7337294b9d81 Mon Sep 17 00:00:00 2001 From: dupl <67990457+dupl@users.noreply.github.com> Date: Fri, 7 Feb 2025 06:50:31 +0800 Subject: [PATCH 03/37] update the lastest Gemini models --- app/constant.ts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/app/constant.ts b/app/constant.ts index 32e5a2263..226cd4046 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -509,10 +509,14 @@ const googleModels = [ "gemini-exp-1114", "gemini-exp-1121", "gemini-exp-1206", + "gemini-2.0-flash", "gemini-2.0-flash-exp", + "gemini-2.0-flash-lite-preview-02-05", "gemini-2.0-flash-thinking-exp", "gemini-2.0-flash-thinking-exp-1219", "gemini-2.0-flash-thinking-exp-01-21", + "gemini-2.0-pro-exp", + "gemini-2.0-pro-exp-02-05", ]; const anthropicModels = [ From 51384ddc5feff6ca31028c77cf6b17b751a0ab24 Mon Sep 17 00:00:00 2001 From: ZhangYichi Date: Fri, 7 Feb 2025 11:13:22 +0800 Subject: [PATCH 04/37] Fix: Set consistent fill color for OpenAI/MoonShot/Grok SVG to prevent color inversion in dark mode --- app/icons/llm-icons/grok.svg | 2 +- app/icons/llm-icons/moonshot.svg | 2 +- app/icons/llm-icons/openai.svg | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/app/icons/llm-icons/grok.svg b/app/icons/llm-icons/grok.svg index 335786777..8125cd610 100644 --- a/app/icons/llm-icons/grok.svg +++ b/app/icons/llm-icons/grok.svg @@ -1,4 +1,4 @@ - Grok diff --git a/app/icons/llm-icons/moonshot.svg b/app/icons/llm-icons/moonshot.svg index 8ab682d37..5206e0f12 100644 --- a/app/icons/llm-icons/moonshot.svg +++ b/app/icons/llm-icons/moonshot.svg @@ -1,4 +1,4 @@ - MoonshotAI diff --git a/app/icons/llm-icons/openai.svg b/app/icons/llm-icons/openai.svg index ac4567f87..564cd5e87 100644 --- a/app/icons/llm-icons/openai.svg +++ b/app/icons/llm-icons/openai.svg @@ -1,4 +1,4 @@ - OpenAI From 1010db834ce52f6a832bf50d3645527f3b42697e Mon Sep 17 00:00:00 2001 From: xiexin12138 Date: Fri, 7 Feb 2025 15:41:40 +0800 Subject: [PATCH 05/37] =?UTF-8?q?fix:=20=E8=A1=A5=E5=85=85=E7=A1=85?= =?UTF-8?q?=E5=9F=BA=E6=B5=81=E5=8A=A8=E7=9A=84=20env=20=E7=8E=AF=E5=A2=83?= =?UTF-8?q?=E5=8F=98=E9=87=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.template | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.env.template b/.env.template index 907ec9dfe..4efaa2ff8 100644 --- a/.env.template +++ b/.env.template @@ -73,6 +73,11 @@ ANTHROPIC_API_VERSION= ### anthropic claude Api url (optional) ANTHROPIC_URL= - ### (optional) WHITE_WEBDAV_ENDPOINTS= + +### siliconflow Api key (optional) +SILICONFLOW_API_KEY= + +### siliconflow Api url (optional) +SILICONFLOW_URL= From a780b39c17a271eb44421ac2f027fcf91c3b77cf Mon Sep 17 00:00:00 2001 From: xiexin12138 Date: Fri, 7 Feb 2025 15:43:50 +0800 Subject: [PATCH 06/37] =?UTF-8?q?fix:=20=E8=A1=A5=E5=85=85=E7=A1=85?= =?UTF-8?q?=E5=9F=BA=E6=B5=81=E5=8A=A8=E5=AF=B9=20DeepSeek=20=E6=94=AF?= =?UTF-8?q?=E6=8C=81=E7=9A=84=E4=BB=98=E8=B4=B9=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/constant.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/constant.ts b/app/constant.ts index 32e5a2263..dd478c5e7 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -620,6 +620,8 @@ const siliconflowModels = [ "deepseek-ai/DeepSeek-V3", "meta-llama/Llama-3.3-70B-Instruct", "THUDM/glm-4-9b-chat", + "Pro/deepseek-ai/DeepSeek-R1", + "Pro/deepseek-ai/DeepSeek-V3", ]; let seq = 1000; // 内置的模型序号生成器从1000开始 From f30c6a4348fb25fead1d1ba4f4ff6717a45496fb Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Fri, 7 Feb 2025 16:14:19 +0800 Subject: [PATCH 07/37] fix doubao and grok not upload image --- app/client/platforms/bytedance.ts | 11 ++++++----- app/client/platforms/xai.ts | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index a2f0660d8..c2f128128 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -22,7 +22,7 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi { } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ - role: v.role, - content: getMessageTextContent(v), - })); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = await preProcessImageContent(v.content); + messages.push({ role: v.role, content }); + } const modelConfig = { ...useAppConfig.getState().modelConfig, diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index 06dbaaa29..8c41c2d98 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -17,7 +17,7 @@ import { SpeechOptions, } from "../api"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { preProcessImageContent } from "@/app/utils/chat"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -62,7 +62,7 @@ export class XAIApi implements LLMApi { async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { - const content = getMessageTextContent(v); + const content = await preProcessImageContent(v.content); messages.push({ role: v.role, content }); } From f156430cc5f9451618b13e6432148d1d0dd35c5c Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Fri, 7 Feb 2025 16:18:15 +0800 Subject: [PATCH 08/37] fix emoji issue for doubao and glm's congview & congvideox --- app/components/emoji.tsx | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 6686d8731..6cefe3497 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -80,9 +80,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { LlmIcon = BotIconGrok; } else if (modelName.startsWith("hunyuan")) { LlmIcon = BotIconHunyuan; - } else if (modelName.startsWith("doubao")) { + } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { LlmIcon = BotIconDoubao; - } else if (modelName.startsWith("glm")) { + } else if ( + modelName.startsWith("glm") || + modelName.startsWith("cogview-") || + modelName.startsWith("cogvideox-") + ) { LlmIcon = BotIconChatglm; } From 3fe55b4f7ff1791cf6e8c5d9da02b69a240e98a8 Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Fri, 7 Feb 2025 16:20:07 +0800 Subject: [PATCH 09/37] fix bug that gemini has multiple candidates part --- app/client/platforms/google.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 5ca8e1071..22c89b13f 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -69,9 +69,16 @@ export class GeminiProApi implements LLMApi { .join("\n\n"); }; + let content = ""; + if (Array.isArray(res)) { + res.map((item) => { + content += getTextFromParts(item?.candidates?.at(0)?.content?.parts); + }); + } + return ( getTextFromParts(res?.candidates?.at(0)?.content?.parts) || - getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || + content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || res?.error?.message || "" ); From a5a976824591a7e2c228dbb257616b98fd7a53ed Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Fri, 7 Feb 2025 16:34:14 +0800 Subject: [PATCH 10/37] change request timeout for thinking mode --- app/client/platforms/deepseek.ts | 7 ++++++- app/client/platforms/google.ts | 10 ++++++++-- app/client/platforms/openai.ts | 9 +++++++-- app/constant.ts | 1 + 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index 2bf3b2338..c436ae61d 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -5,6 +5,7 @@ import { DEEPSEEK_BASE_URL, DeepSeek, REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi { // console.log(chatPayload); + const isR1 = + options.config.model.endsWith("-reasoner") || + options.config.model.endsWith("-r1"); + // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 22c89b13f..1e593dd42 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,4 +1,9 @@ -import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { + ApiPath, + Google, + REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, +} from "@/app/constant"; import { ChatOptions, getHeaders, @@ -197,10 +202,11 @@ export class GeminiProApi implements LLMApi { headers: getHeaders(), }; + const isThinking = options.config.model.includes("-thinking"); // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, ); if (shouldStream) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 467bb82e0..fbe533cad 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -8,6 +8,7 @@ import { Azure, REQUEST_TIMEOUT_MS, ServiceProvider, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { ChatMessageTool, @@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); - const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3"); + const isO1OrO3 = + options.config.model.startsWith("o1") || + options.config.model.startsWith("o3"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1OrO3 + ? REQUEST_TIMEOUT_MS_FOR_THINKING + : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); diff --git a/app/constant.ts b/app/constant.ts index 32e5a2263..64aa734f4 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id; export const STORAGE_KEY = "chatgpt-next-web"; export const REQUEST_TIMEOUT_MS = 60000; +export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5; export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; From c4e9cb03a92751b37ec0b9615ef5ec056fa20bde Mon Sep 17 00:00:00 2001 From: itsevin <2720269770@qq.com> Date: Fri, 7 Feb 2025 20:29:21 +0800 Subject: [PATCH 11/37] Add Xai model --- app/constant.ts | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/app/constant.ts b/app/constant.ts index 32e5a2263..e04152d0f 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -585,7 +585,16 @@ const iflytekModels = [ const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"]; -const xAIModes = ["grok-beta"]; +const xAIModes = [ + "grok-beta", + "grok-2", + "grok-2-1212", + "grok-2-latest", + "grok-vision-beta", + "grok-2-vision-1212", + "grok-2-vision", + "grok-2-vision-latest", +]; const chatglmModels = [ "glm-4-plus", From 2a3996e0d66e41a99bfd4373c2bd9dec4d78652a Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Sat, 8 Feb 2025 14:38:12 +0800 Subject: [PATCH 12/37] Update siliconflow.ts --- app/client/platforms/siliconflow.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index fe2f9862b..1bdf587e6 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -121,10 +121,10 @@ export class SiliconflowApi implements LLMApi { // console.log(chatPayload); // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - REQUEST_TIMEOUT_MS, - ); + const requestTimeoutId = setTimeout(() => { + console.error("[Request] SiliconFlow API timeout"); + controller.abort(); + }, 10 * REQUEST_TIMEOUT_MS); if (shouldStream) { const [tools, funcs] = usePluginStore From 1ae5fdbf013349a2c32e6083b41500cbf2c4000d Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Sat, 8 Feb 2025 16:15:10 +0800 Subject: [PATCH 13/37] mini optimizations --- app/client/platforms/siliconflow.ts | 4 ++-- app/components/emoji.tsx | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index fe2f9862b..d6d51fe93 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -4,7 +4,7 @@ import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow, - REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { useAccessStore, @@ -123,7 +123,7 @@ export class SiliconflowApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, ); if (shouldStream) { diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 6cefe3497..ecb1c6581 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { if ( modelName.startsWith("gpt") || modelName.startsWith("chatgpt") || + modelName.startsWith("dall-e") || + modelName.startsWith("dalle") || modelName.startsWith("o1") || modelName.startsWith("o3") ) { From acf75ce68f7152972fe5924b4880b3ae06c0ca65 Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Sat, 8 Feb 2025 16:34:17 +0800 Subject: [PATCH 14/37] Remove unnecessary trimming --- app/client/platforms/siliconflow.ts | 8 ++++---- app/utils/chat.ts | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index fe2f9862b..90dc13511 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -174,8 +174,8 @@ export class SiliconflowApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -183,12 +183,12 @@ export class SiliconflowApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/utils/chat.ts b/app/utils/chat.ts index c04d33cbf..b77955e6e 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -576,7 +576,7 @@ export function streamWithThink( try { const chunk = parseSSE(text, runTools); // Skip if content is empty - if (!chunk?.content || chunk.content.trim().length === 0) { + if (!chunk?.content || chunk.content.length === 0) { return; } // Check if thinking mode changed From 2842b264e06b08de9cfdcb84982ee6571fa45881 Mon Sep 17 00:00:00 2001 From: RiverRay Date: Sun, 9 Feb 2025 11:05:32 +0800 Subject: [PATCH 15/37] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 047f9431e..4864ab00d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023-2024 Zhang Yifei +Copyright (c) 2023-2025 NextChat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal From 9f91c2d05c21c7fea604a88a0974679a07293c81 Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Sun, 9 Feb 2025 16:52:46 +0800 Subject: [PATCH 16/37] fix avatar for export message preview and saved image --- app/components/exporter.tsx | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 79ae87be2..69a73062a 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg"; import LoadingIcon from "../icons/three-dots.svg"; import ChatGptIcon from "../icons/chatgpt.png"; import ShareIcon from "../icons/share.svg"; -import BotIcon from "../icons/bot.png"; import DownloadIcon from "../icons/download.svg"; import { useEffect, useMemo, useRef, useState } from "react"; @@ -33,13 +32,13 @@ import dynamic from "next/dynamic"; import NextImage from "next/image"; import { toBlob, toPng } from "html-to-image"; -import { DEFAULT_MASK_AVATAR } from "../store/mask"; import { prettyObject } from "../utils/format"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { getClientConfig } from "../config/client"; import { type ClientApi, getClientApi } from "../client/api"; import { getMessageTextContent } from "../utils"; +import { MaskAvatar } from "./mask"; import clsx from "clsx"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { @@ -407,22 +406,6 @@ export function PreviewActions(props: { ); } -function ExportAvatar(props: { avatar: string }) { - if (props.avatar === DEFAULT_MASK_AVATAR) { - return ( - bot - ); - } - - return ; -} - export function ImagePreviewer(props: { messages: ChatMessage[]; topic: string; @@ -546,9 +529,12 @@ export function ImagePreviewer(props: { github.com/ChatGPTNextWeb/ChatGPT-Next-Web
- + & - +
@@ -576,9 +562,14 @@ export function ImagePreviewer(props: { key={i} >
- + {m.role === "user" ? ( + + ) : ( + + )}
From 0bfc6480855640032ec3593960b434fc5e1c1de5 Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Sun, 9 Feb 2025 18:47:57 +0800 Subject: [PATCH 17/37] fix model icon on siliconflow --- app/components/emoji.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index ecb1c6581..19fb1400e 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { LlmIcon = BotIconGemma; } else if (modelName.startsWith("claude")) { LlmIcon = BotIconClaude; - } else if (modelName.startsWith("llama")) { + } else if (modelName.includes("llama")) { LlmIcon = BotIconMeta; } else if (modelName.startsWith("mixtral")) { LlmIcon = BotIconMistral; - } else if (modelName.startsWith("deepseek")) { + } else if (modelName.includes("deepseek")) { LlmIcon = BotIconDeepseek; } else if (modelName.startsWith("moonshot")) { LlmIcon = BotIconMoonshot; @@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { LlmIcon = BotIconDoubao; } else if ( - modelName.startsWith("glm") || + modelName.includes("glm") || modelName.startsWith("cogview-") || modelName.startsWith("cogvideox-") ) { From 18fa2cc30d96fbb452efd9226db7ca6021cacb3e Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Sun, 9 Feb 2025 18:49:26 +0800 Subject: [PATCH 18/37] fix model icon on siliconflow --- app/components/emoji.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 19fb1400e..1bf39ac1d 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { LlmIcon = BotIconGemma; } else if (modelName.startsWith("claude")) { LlmIcon = BotIconClaude; - } else if (modelName.includes("llama")) { + } else if (modelName.toLowerCase().includes("llama")) { LlmIcon = BotIconMeta; } else if (modelName.startsWith("mixtral")) { LlmIcon = BotIconMistral; - } else if (modelName.includes("deepseek")) { + } else if (modelName.toLowerCase().includes("deepseek")) { LlmIcon = BotIconDeepseek; } else if (modelName.startsWith("moonshot")) { LlmIcon = BotIconMoonshot; @@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { LlmIcon = BotIconDoubao; } else if ( - modelName.includes("glm") || + modelName.toLowerCase().includes("glm") || modelName.startsWith("cogview-") || modelName.startsWith("cogvideox-") ) { From 2137aa65bfaeda33bdbfad7f1ae36bfdde8c9edf Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Mon, 10 Feb 2025 11:03:49 +0800 Subject: [PATCH 19/37] Model listing of SiliconFlow --- app/client/platforms/siliconflow.ts | 44 +++++++++++++++++++++++++++-- app/constant.ts | 1 + 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index 1ad316a61..8cf9ad3b1 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -5,6 +5,7 @@ import { SILICONFLOW_BASE_URL, SiliconFlow, REQUEST_TIMEOUT_MS_FOR_THINKING, + DEFAULT_MODELS, } from "@/app/constant"; import { useAccessStore, @@ -27,10 +28,19 @@ import { getMessageTextContentWithoutThinking, } from "@/app/utils"; import { RequestPayload } from "./openai"; + import { fetch } from "@/app/utils/stream"; +export interface SiliconFlowListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} export class SiliconflowApi implements LLMApi { - private disableListModels = true; + private disableListModels = false; path(path: string): string { const accessStore = useAccessStore.getState(); @@ -238,6 +248,36 @@ export class SiliconflowApi implements LLMApi { } async models(): Promise { - return []; + if (this.disableListModels) { + return DEFAULT_MODELS.slice(); + } + + const res = await fetch(this.path(SiliconFlow.ListModelPath), { + method: "GET", + headers: { + ...getHeaders(), + }, + }); + + const resJson = (await res.json()) as SiliconFlowListModelResponse; + const chatModels = resJson.data; + console.log("[Models]", chatModels); + + if (!chatModels) { + return []; + } + + let seq = 1000; //同 Constant.ts 中的排序保持一致 + return chatModels.map((m) => ({ + name: m.id, + available: true, + sorted: seq++, + provider: { + id: "siliconflow", + providerName: "SiliconFlow", + providerType: "siliconflow", + sorted: 14, + }, + })); } } diff --git a/app/constant.ts b/app/constant.ts index 09eec44b6..5d0640d1c 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -258,6 +258,7 @@ export const ChatGLM = { export const SiliconFlow = { ExampleEndpoint: SILICONFLOW_BASE_URL, ChatPath: "v1/chat/completions", + ListModelPath: "v1/models?&sub_type=chat", }; export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang From 86f86962fb0725b888cee6ebd9eb9f818a0c9cee Mon Sep 17 00:00:00 2001 From: Shenghang Tsai Date: Mon, 10 Feb 2025 13:37:48 +0800 Subject: [PATCH 20/37] Support VLM on SiliconFlow --- app/client/platforms/siliconflow.ts | 8 ++++++-- app/constant.ts | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index 1ad316a61..17650a9c6 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -13,7 +13,7 @@ import { ChatMessageTool, usePluginStore, } from "@/app/store"; -import { streamWithThink } from "@/app/utils/chat"; +import { preProcessImageContent, streamWithThink } from "@/app/utils/chat"; import { ChatOptions, getHeaders, @@ -25,6 +25,7 @@ import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, getMessageTextContentWithoutThinking, + isVisionModel, } from "@/app/utils"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -71,13 +72,16 @@ export class SiliconflowApi implements LLMApi { } async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); const messages: ChatOptions["messages"] = []; for (const v of options.messages) { if (v.role === "assistant") { const content = getMessageTextContentWithoutThinking(v); messages.push({ role: v.role, content }); } else { - const content = getMessageTextContent(v); + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); messages.push({ role: v.role, content }); } } diff --git a/app/constant.ts b/app/constant.ts index 09eec44b6..d9cb62bf9 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -462,6 +462,7 @@ export const VISION_MODEL_REGEXES = [ /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview" /^dall-e-3$/, // Matches exactly "dall-e-3" /glm-4v/, + /vl/i, ]; export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; From 98a11e56d2c55d7d89dfc4c8905045781863bf98 Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Tue, 11 Feb 2025 12:46:46 +0800 Subject: [PATCH 21/37] support alibaba and bytedance's reasoning_content --- app/client/platforms/alibaba.ts | 220 ++++++++++++++---------------- app/client/platforms/bytedance.ts | 205 +++++++++++++--------------- 2 files changed, 200 insertions(+), 225 deletions(-) diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 6fe69e87a..13cb558f9 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -5,8 +5,14 @@ import { ALIBABA_BASE_URL, REQUEST_TIMEOUT_MS, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { streamWithThink } from "@/app/utils/chat"; import { ChatOptions, getHeaders, @@ -15,14 +21,11 @@ import { SpeechOptions, MultimodalContent, } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { + getMessageTextContent, + getMessageTextContentWithoutThinking, +} from "@/app/utils"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -92,7 +95,10 @@ export class QwenApi implements LLMApi { async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, - content: getMessageTextContent(v), + content: + v.role === "assistant" + ? getMessageTextContentWithoutThinking(v) + : getMessageTextContent(v), })); const modelConfig = { @@ -122,15 +128,17 @@ export class QwenApi implements LLMApi { options.onController?.(controller); try { + const headers = { + ...getHeaders(), + "X-DashScope-SSE": shouldStream ? "enable" : "disable", + }; + const chatPath = this.path(Alibaba.ChatPath); const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), signal: controller.signal, - headers: { - ...getHeaders(), - "X-DashScope-SSE": shouldStream ? "enable" : "disable", - }, + headers: headers, }; // make a fetch request @@ -140,116 +148,96 @@ export class QwenApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - let responseRes: Response; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); - } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText, responseRes); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - fetch: fetch as any, - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[Alibaba] request response content type: ", - contentType, - ); - responseRes = res; - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return streamWithThink( + chatPath, + requestPayload, + headers, + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.output.choices as Array<{ + message: { + content: string | null; + tool_calls: ChatMessageTool[]; + reasoning_content: string | null; + }; + }>; + const tool_calls = choices[0]?.message?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } } + const reasoning = choices[0]?.message?.reasoning_content; + const content = choices[0]?.message?.content; + // Skip if both content and reasoning_content are empty or null if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + (!reasoning || reasoning.trim().length === 0) && + (!content || content.trim().length === 0) ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); + return { + isThinking: false, + content: "", + }; } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.output.choices as Array<{ - message: { content: string }; - }>; - const delta = choices[0]?.message?.content; - if (delta) { - remainText += delta; - } - } catch (e) { - console.error("[Request] parse error", text, msg); + + if (reasoning && reasoning.trim().length > 0) { + return { + isThinking: true, + content: reasoning, + }; + } else if (content && content.trim().length > 0) { + return { + isThinking: false, + content: content, + }; } + + return { + isThinking: false, + content: "", + }; }, - onclose() { - finish(); + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); + options, + ); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index c2f128128..5d7ddebeb 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -5,7 +5,13 @@ import { BYTEDANCE_BASE_URL, REQUEST_TIMEOUT_MS, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; import { ChatOptions, @@ -15,14 +21,11 @@ import { MultimodalContent, SpeechOptions, } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; + +import { streamWithThink } from "@/app/utils/chat"; import { getClientConfig } from "@/app/config/client"; import { preProcessImageContent } from "@/app/utils/chat"; +import { getMessageTextContentWithoutThinking } from "@/app/utils"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -86,7 +89,10 @@ export class DoubaoApi implements LLMApi { async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { - const content = await preProcessImageContent(v.content); + const content = + v.role === "assistant" + ? getMessageTextContentWithoutThinking(v) + : await preProcessImageContent(v.content); messages.push({ role: v.role, content }); } @@ -128,115 +134,96 @@ export class DoubaoApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - let responseRes: Response; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); - } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText, responseRes); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - fetch: fetch as any, - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[ByteDance] request response content type: ", - contentType, - ); - responseRes = res; - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return streamWithThink( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string | null; + tool_calls: ChatMessageTool[]; + reasoning_content: string | null; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } } + const reasoning = choices[0]?.delta?.reasoning_content; + const content = choices[0]?.delta?.content; + // Skip if both content and reasoning_content are empty or null if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 + (!reasoning || reasoning.trim().length === 0) && + (!content || content.trim().length === 0) ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); + return { + isThinking: false, + content: "", + }; } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - if (delta) { - remainText += delta; - } - } catch (e) { - console.error("[Request] parse error", text, msg); + + if (reasoning && reasoning.trim().length > 0) { + return { + isThinking: true, + content: reasoning, + }; + } else if (content && content.trim().length > 0) { + return { + isThinking: false, + content: content, + }; } + + return { + isThinking: false, + content: "", + }; }, - onclose() { - finish(); + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); + options, + ); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); From b0758cccde8709af7fa31aed8c019029c97be82b Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Tue, 11 Feb 2025 16:08:30 +0800 Subject: [PATCH 22/37] optimization --- app/client/platforms/alibaba.ts | 10 ++++++---- app/client/platforms/bytedance.ts | 11 ++++++----- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 13cb558f9..44dbd847a 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -171,6 +171,9 @@ export class QwenApi implements LLMApi { reasoning_content: string | null; }; }>; + + if (!choices?.length) return { isThinking: false, content: "" }; + const tool_calls = choices[0]?.message?.tool_calls; if (tool_calls?.length > 0) { const index = tool_calls[0]?.index; @@ -190,6 +193,7 @@ export class QwenApi implements LLMApi { runTools[index]["function"]["arguments"] += args; } } + const reasoning = choices[0]?.message?.reasoning_content; const content = choices[0]?.message?.content; @@ -227,10 +231,8 @@ export class QwenApi implements LLMApi { toolCallMessage: any, toolCallResult: any[], ) => { - // @ts-ignore - requestPayload?.messages?.splice( - // @ts-ignore - requestPayload?.messages?.length, + requestPayload?.input?.messages?.splice( + requestPayload?.input?.messages?.length, 0, toolCallMessage, ...toolCallResult, diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 5d7ddebeb..5e2e63f58 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -37,7 +37,7 @@ export interface OpenAIListModelResponse { }>; } -interface RequestPayload { +interface RequestPayloadForByteDance { messages: { role: "system" | "user" | "assistant"; content: string | MultimodalContent[]; @@ -105,7 +105,7 @@ export class DoubaoApi implements LLMApi { }; const shouldStream = !!options.config.stream; - const requestPayload: RequestPayload = { + const requestPayload: RequestPayloadForByteDance = { messages, stream: shouldStream, model: modelConfig.model, @@ -157,6 +157,9 @@ export class DoubaoApi implements LLMApi { reasoning_content: string | null; }; }>; + + if (!choices?.length) return { isThinking: false, content: "" }; + const tool_calls = choices[0]?.delta?.tool_calls; if (tool_calls?.length > 0) { const index = tool_calls[0]?.index; @@ -209,13 +212,11 @@ export class DoubaoApi implements LLMApi { }, // processToolMessage, include tool_calls message and tool call results ( - requestPayload: RequestPayload, + requestPayload: RequestPayloadForByteDance, toolCallMessage: any, toolCallResult: any[], ) => { - // @ts-ignore requestPayload?.messages?.splice( - // @ts-ignore requestPayload?.messages?.length, 0, toolCallMessage, From 97142583224faa28e7cdd43eba75b77828f280af Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Tue, 11 Feb 2025 18:57:16 +0800 Subject: [PATCH 23/37] support deepseek-r1@OpenAI's reasoning_content, parse from stream --- app/client/platforms/openai.ts | 40 +++++++++++++++++++++++++++++++--- app/utils/chat.ts | 18 +++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index fbe533cad..9d43c8161 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -22,7 +22,7 @@ import { preProcessImageContent, uploadImage, base64Image2Blob, - stream, + streamWithThink, } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing"; @@ -294,7 +294,7 @@ export class ChatGPTApi implements LLMApi { useChatStore.getState().currentSession().mask?.plugin || [], ); // console.log("getAsTools", tools, funcs); - stream( + streamWithThink( chatPath, requestPayload, getHeaders(), @@ -309,8 +309,12 @@ export class ChatGPTApi implements LLMApi { delta: { content: string; tool_calls: ChatMessageTool[]; + reasoning_content: string | null; }; }>; + + if (!choices?.length) return { isThinking: false, content: "" }; + const tool_calls = choices[0]?.delta?.tool_calls; if (tool_calls?.length > 0) { const id = tool_calls[0]?.id; @@ -330,7 +334,37 @@ export class ChatGPTApi implements LLMApi { runTools[index]["function"]["arguments"] += args; } } - return choices[0]?.delta?.content; + + const reasoning = choices[0]?.delta?.reasoning_content; + const content = choices[0]?.delta?.content; + + // Skip if both content and reasoning_content are empty or null + if ( + (!reasoning || reasoning.trim().length === 0) && + (!content || content.trim().length === 0) + ) { + return { + isThinking: false, + content: "", + }; + } + + if (reasoning && reasoning.trim().length > 0) { + return { + isThinking: true, + content: reasoning, + }; + } else if (content && content.trim().length > 0) { + return { + isThinking: false, + content: content, + }; + } + + return { + isThinking: false, + content: "", + }; }, // processToolMessage, include tool_calls message and tool call results ( diff --git a/app/utils/chat.ts b/app/utils/chat.ts index b77955e6e..efc496f2c 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -400,6 +400,7 @@ export function streamWithThink( let responseRes: Response; let isInThinkingMode = false; let lastIsThinking = false; + let lastIsThinkingTagged = false; //between and tags // animate response to make it looks smooth function animateResponseText() { @@ -579,6 +580,23 @@ export function streamWithThink( if (!chunk?.content || chunk.content.length === 0) { return; } + + // deal with and tags start + if (!chunk.isThinking) { + if (chunk.content.startsWith("")) { + chunk.isThinking = true; + chunk.content = chunk.content.slice(7).trim(); + lastIsThinkingTagged = true; + } else if (chunk.content.endsWith("")) { + chunk.isThinking = false; + chunk.content = chunk.content.slice(0, -8).trim(); + lastIsThinkingTagged = false; + } else if (lastIsThinkingTagged) { + chunk.isThinking = true; + } + } + // deal with and tags start + // Check if thinking mode changed const isThinkingChanged = lastIsThinking !== chunk.isThinking; lastIsThinking = chunk.isThinking; From 476d946f961a551ffedc7734dcce28faa7dc30fe Mon Sep 17 00:00:00 2001 From: suruiqiang Date: Wed, 12 Feb 2025 17:49:54 +0800 Subject: [PATCH 24/37] fix bug (trim eats space or \n mistakenly), optimize timeout by model --- app/client/platforms/alibaba.ts | 18 +++++++----------- app/client/platforms/baidu.ts | 11 +++-------- app/client/platforms/bytedance.ts | 22 ++++++++++------------ app/client/platforms/deepseek.ts | 25 +++++++------------------ app/client/platforms/glm.ts | 15 +++++++-------- app/client/platforms/google.ts | 10 +++------- app/client/platforms/openai.ts | 14 ++++++-------- app/client/platforms/siliconflow.ts | 10 +++------- app/client/platforms/tencent.ts | 10 +++++++--- app/client/platforms/xai.ts | 5 +++-- app/utils.ts | 20 +++++++++++++++++++- 11 files changed, 75 insertions(+), 85 deletions(-) diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 44dbd847a..88511768c 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -1,10 +1,5 @@ "use client"; -import { - ApiPath, - Alibaba, - ALIBABA_BASE_URL, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -25,6 +20,7 @@ import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, getMessageTextContentWithoutThinking, + getTimeoutMSByModel, } from "@/app/utils"; import { fetch } from "@/app/utils/stream"; @@ -144,7 +140,7 @@ export class QwenApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { @@ -199,8 +195,8 @@ export class QwenApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -208,12 +204,12 @@ export class QwenApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 9e8c2f139..dc990db41 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -1,10 +1,5 @@ "use client"; -import { - ApiPath, - Baidu, - BAIDU_BASE_URL, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getAccessToken } from "@/app/utils/baidu"; @@ -23,7 +18,7 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent } from "@/app/utils"; +import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 5e2e63f58..f9524cba2 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -1,10 +1,5 @@ "use client"; -import { - ApiPath, - ByteDance, - BYTEDANCE_BASE_URL, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -25,7 +20,10 @@ import { import { streamWithThink } from "@/app/utils/chat"; import { getClientConfig } from "@/app/config/client"; import { preProcessImageContent } from "@/app/utils/chat"; -import { getMessageTextContentWithoutThinking } from "@/app/utils"; +import { + getMessageTextContentWithoutThinking, + getTimeoutMSByModel, +} from "@/app/utils"; import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { @@ -130,7 +128,7 @@ export class DoubaoApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { @@ -184,8 +182,8 @@ export class DoubaoApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -193,12 +191,12 @@ export class DoubaoApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index c436ae61d..b21d24cef 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -1,12 +1,6 @@ "use client"; // azure and openai, using same models. so using same LLMApi. -import { - ApiPath, - DEEPSEEK_BASE_URL, - DeepSeek, - REQUEST_TIMEOUT_MS, - REQUEST_TIMEOUT_MS_FOR_THINKING, -} from "@/app/constant"; +import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, getMessageTextContentWithoutThinking, + getTimeoutMSByModel, } from "@/app/utils"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -116,16 +111,10 @@ export class DeepSeekApi implements LLMApi { headers: getHeaders(), }; - // console.log(chatPayload); - - const isR1 = - options.config.model.endsWith("-reasoner") || - options.config.model.endsWith("-r1"); - // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { @@ -176,8 +165,8 @@ export class DeepSeekApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -185,12 +174,12 @@ export class DeepSeekApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts index a8d1869e3..98b10277d 100644 --- a/app/client/platforms/glm.ts +++ b/app/client/platforms/glm.ts @@ -1,10 +1,5 @@ "use client"; -import { - ApiPath, - CHATGLM_BASE_URL, - ChatGLM, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; +import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -21,7 +16,11 @@ import { SpeechOptions, } from "../api"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { + getMessageTextContent, + isVisionModel, + getTimeoutMSByModel, +} from "@/app/utils"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; import { preProcessImageContent } from "@/app/utils/chat"; @@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi { const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (modelType === "image" || modelType === "video") { diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 1e593dd42..654f0e3e4 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,9 +1,4 @@ -import { - ApiPath, - Google, - REQUEST_TIMEOUT_MS, - REQUEST_TIMEOUT_MS_FOR_THINKING, -} from "@/app/constant"; +import { ApiPath, Google } from "@/app/constant"; import { ChatOptions, getHeaders, @@ -27,6 +22,7 @@ import { getMessageTextContent, getMessageImages, isVisionModel, + getTimeoutMSByModel, } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat"; import { nanoid } from "nanoid"; @@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 9d43c8161..c6f3fc425 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -8,7 +8,6 @@ import { Azure, REQUEST_TIMEOUT_MS, ServiceProvider, - REQUEST_TIMEOUT_MS_FOR_THINKING, } from "@/app/constant"; import { ChatMessageTool, @@ -42,6 +41,7 @@ import { getMessageTextContent, isVisionModel, isDalle3 as _isDalle3, + getTimeoutMSByModel, } from "@/app/utils"; import { fetch } from "@/app/utils/stream"; @@ -340,8 +340,8 @@ export class ChatGPTApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -349,12 +349,12 @@ export class ChatGPTApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, @@ -396,9 +396,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1OrO3 - ? REQUEST_TIMEOUT_MS_FOR_THINKING - : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + getTimeoutMSByModel(options.config.model), ); const res = await fetch(chatPath, chatPayload); diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index 1ad316a61..92c0261c4 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -1,11 +1,6 @@ "use client"; // azure and openai, using same models. so using same LLMApi. -import { - ApiPath, - SILICONFLOW_BASE_URL, - SiliconFlow, - REQUEST_TIMEOUT_MS_FOR_THINKING, -} from "@/app/constant"; +import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -25,6 +20,7 @@ import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, getMessageTextContentWithoutThinking, + getTimeoutMSByModel, } from "@/app/utils"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -123,7 +119,7 @@ export class SiliconflowApi implements LLMApi { // Use extended timeout for thinking models as they typically require more processing time const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS_FOR_THINKING, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 580844a5b..8adeb1b3e 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -1,5 +1,5 @@ "use client"; -import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ApiPath, TENCENT_BASE_URL } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { @@ -17,7 +17,11 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { + getMessageTextContent, + isVisionModel, + getTimeoutMSByModel, +} from "@/app/utils"; import mapKeys from "lodash-es/mapKeys"; import mapValues from "lodash-es/mapValues"; import isArray from "lodash-es/isArray"; @@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts index 8c41c2d98..830ad4778 100644 --- a/app/client/platforms/xai.ts +++ b/app/client/platforms/xai.ts @@ -1,6 +1,6 @@ "use client"; // azure and openai, using same models. so using same LLMApi. -import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant"; import { useAccessStore, useAppConfig, @@ -17,6 +17,7 @@ import { SpeechOptions, } from "../api"; import { getClientConfig } from "@/app/config/client"; +import { getTimeoutMSByModel } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; @@ -103,7 +104,7 @@ export class XAIApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + getTimeoutMSByModel(options.config.model), ); if (shouldStream) { diff --git a/app/utils.ts b/app/utils.ts index f23378019..6183e03b0 100644 --- a/app/utils.ts +++ b/app/utils.ts @@ -2,7 +2,11 @@ import { useEffect, useState } from "react"; import { showToast } from "./components/ui-lib"; import Locale from "./locales"; import { RequestMessage } from "./client/api"; -import { ServiceProvider } from "./constant"; +import { + REQUEST_TIMEOUT_MS, + REQUEST_TIMEOUT_MS_FOR_THINKING, + ServiceProvider, +} from "./constant"; // import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http"; import { fetch as tauriStreamFetch } from "./utils/stream"; import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant"; @@ -292,6 +296,20 @@ export function isDalle3(model: string) { return "dall-e-3" === model; } +export function getTimeoutMSByModel(model: string) { + model = model.toLowerCase(); + if ( + model.startsWith("dall-e") || + model.startsWith("dalle") || + model.startsWith("o1") || + model.startsWith("o3") || + model.includes("deepseek-r") || + model.includes("-thinking") + ) + return REQUEST_TIMEOUT_MS_FOR_THINKING; + return REQUEST_TIMEOUT_MS; +} + export function getModelSizes(model: string): ModelSize[] { if (isDalle3(model)) { return ["1024x1024", "1792x1024", "1024x1792"]; From 008e339b6d1c227c47a9cb4877ba8bb064f41043 Mon Sep 17 00:00:00 2001 From: Rasmus Erik Voel Jensen Date: Sat, 15 Feb 2025 12:52:44 +0100 Subject: [PATCH 25/37] danish locale --- app/locales/da.ts | 832 +++++++++++++++++++++++++++++++++++++++++++ app/locales/index.ts | 4 + 2 files changed, 836 insertions(+) create mode 100644 app/locales/da.ts diff --git a/app/locales/da.ts b/app/locales/da.ts new file mode 100644 index 000000000..e4d74eab8 --- /dev/null +++ b/app/locales/da.ts @@ -0,0 +1,832 @@ +import { getClientConfig } from "../config/client"; +import { SubmitKey } from "../store/config"; +import { SAAS_CHAT_UTM_URL } from "@/app/constant"; +import { PartialLocaleType } from "./index"; + +const isApp = !!getClientConfig()?.isApp; +const da: PartialLocaleType = { + WIP: "Der kommer snart mere...", + Error: { + Unauthorized: isApp + ? `Hov, der skete en fejl. Sådan kan du komme videre: + \\ 1️⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL}) + \\ 2️⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️` + : `Hov, der skete en fejl. Lad os løse det: + \\ 1️⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL}) + \\ 2️⃣ Bruger du en privat opsætning? [Tryk her](/#/auth) for at taste din nøgle 🔑 + \\ 3️⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️ + `, + }, + Auth: { + Return: "Tilbage", + Title: "Adgangskode", + Tips: "Skriv venligst koden herunder", + SubTips: "Eller brug din egen OpenAI- eller Google-nøgle", + Input: "Adgangskode", + Confirm: "OK", + Later: "Senere", + SaasTips: "Hvis det er for svært, kan du starte nu", + }, + ChatItem: { + ChatItemCount: (count: number) => `${count} beskeder`, + }, + Chat: { + SubTitle: (count: number) => `${count} beskeder`, + EditMessage: { + Title: "Rediger beskeder", + Topic: { + Title: "Emne", + SubTitle: "Skift emne for denne chat", + }, + }, + Actions: { + ChatList: "Gå til chatliste", + CompressedHistory: "Komprimeret historie", + Export: "Eksporter alle beskeder som Markdown", + Copy: "Kopiér", + Stop: "Stop", + Retry: "Prøv igen", + Pin: "Fastgør", + PinToastContent: "1 besked er nu fastgjort", + PinToastAction: "Se", + Delete: "Slet", + Edit: "Rediger", + FullScreen: "Fuld skærm", + RefreshTitle: "Opdatér titel", + RefreshToast: "Anmodning om ny titel sendt", + Speech: "Afspil", + StopSpeech: "Stop", + }, + Commands: { + new: "Ny chat", + newm: "Ny chat med persona", + next: "Næste chat", + prev: "Forrige chat", + clear: "Ryd alt før", + fork: "Kopiér chat", + del: "Slet chat", + }, + InputActions: { + Stop: "Stop", + ToBottom: "Ned til nyeste", + Theme: { + auto: "Automatisk", + light: "Lyst tema", + dark: "Mørkt tema", + }, + Prompt: "Prompts", + Masks: "Personaer", + Clear: "Ryd kontekst", + Settings: "Indstillinger", + UploadImage: "Upload billeder", + }, + Rename: "Omdøb chat", + Typing: "Skriver…", + Input: (submitKey: string) => { + let inputHints = `${submitKey} for at sende`; + if (submitKey === String(SubmitKey.Enter)) { + inputHints += ", Shift + Enter for ny linje"; + } + return ( + inputHints + ", / for at søge i prompts, : for at bruge kommandoer" + ); + }, + Send: "Send", + StartSpeak: "Start oplæsning", + StopSpeak: "Stop oplæsning", + Config: { + Reset: "Nulstil til standard", + SaveAs: "Gem som persona", + }, + IsContext: "Ekstra prompt til baggrund", + ShortcutKey: { + Title: "Hurtigtaster", + newChat: "Åbn ny chat", + focusInput: "Fokus på tekstfeltet", + copyLastMessage: "Kopiér sidste svar", + copyLastCode: "Kopiér sidste kodeblok", + showShortcutKey: "Vis hurtigtaster", + clearContext: "Ryd kontekst", + }, + }, + Export: { + Title: "Eksportér beskeder", + Copy: "Kopiér alt", + Download: "Download", + MessageFromYou: "Fra dig", + MessageFromChatGPT: "Fra ChatGPT", + Share: "Del til ShareGPT", + Format: { + Title: "Filformat", + SubTitle: "Vælg enten Markdown eller PNG-billede", + }, + IncludeContext: { + Title: "Tag baggrund med", + SubTitle: "Skal ekstra baggrund (persona) med i eksporten?", + }, + Steps: { + Select: "Vælg", + Preview: "Forhåndsvis", + }, + Image: { + Toast: "Laver billede...", + Modal: "Tryk længe eller højreklik for at gemme", + }, + Artifacts: { + Title: "Del side", + Error: "Fejl ved deling", + }, + }, + Select: { + Search: "Søg", + All: "Vælg alle", + Latest: "Vælg nyeste", + Clear: "Ryd alt", + }, + Memory: { + Title: "Huskesætning", + EmptyContent: "Ingenting lige nu.", + Send: "Send huskesætning", + Copy: "Kopiér huskesætning", + Reset: "Nulstil chat", + ResetConfirm: + "Dette sletter nuværende samtale og hukommelse. Er du sikker?", + }, + Home: { + NewChat: "Ny Chat", + DeleteChat: "Vil du slette den valgte chat?", + DeleteToast: "Chat slettet", + Revert: "Fortryd", + }, + Settings: { + Title: "Indstillinger", + SubTitle: "Alle indstillinger", + ShowPassword: "Vis kodeord", + Danger: { + Reset: { + Title: "Nulstil alle indstillinger", + SubTitle: "Gendan alt til standard", + Action: "Nulstil", + Confirm: "Vil du virkelig nulstille alt?", + }, + Clear: { + Title: "Slet alle data", + SubTitle: "Sletter alt om beskeder og indstillinger", + Action: "Slet", + Confirm: "Er du sikker på, at du vil slette alt?", + }, + }, + Lang: { + Name: "Language", + All: "Alle sprog", + }, + Avatar: "Avatar", + FontSize: { + Title: "Skriftstørrelse", + SubTitle: "Vælg, hvor stor teksten skal være", + }, + FontFamily: { + Title: "Skrifttype", + SubTitle: "Hvis tom, bruger den standard skrifttype", + Placeholder: "Skrifttype-navn", + }, + InjectSystemPrompts: { + Title: "Tilføj system-prompt", + SubTitle: "Læg altid en ekstra prompt først i anmodninger", + }, + InputTemplate: { + Title: "Tekstskabelon", + SubTitle: "Den seneste besked placeres i denne skabelon", + }, + Update: { + Version: (x: string) => `Version: ${x}`, + IsLatest: "Du har nyeste version", + CheckUpdate: "Tjek efter opdatering", + IsChecking: "Tjekker...", + FoundUpdate: (x: string) => `Ny version fundet: ${x}`, + GoToUpdate: "Opdatér", + Success: "Opdatering lykkedes.", + Failed: "Opdatering mislykkedes.", + }, + SendKey: "Tast for send", + Theme: "Tema", + TightBorder: "Stram kant", + SendPreviewBubble: { + Title: "Forhåndsvisnings-boble", + SubTitle: "Vis tekst, før den sendes", + }, + AutoGenerateTitle: { + Title: "Lav titel automatisk", + SubTitle: "Foreslå en titel ud fra chatten", + }, + Sync: { + CloudState: "Seneste opdatering", + NotSyncYet: "Endnu ikke synkroniseret", + Success: "Synkronisering lykkedes", + Fail: "Synkronisering mislykkedes", + Config: { + Modal: { + Title: "Indstil synk", + Check: "Tjek forbindelse", + }, + SyncType: { + Title: "Synk-type", + SubTitle: "Vælg en synk-tjeneste", + }, + Proxy: { + Title: "Aktivér proxy", + SubTitle: "Brug proxy for at undgå netværksproblemer", + }, + ProxyUrl: { + Title: "Proxy-adresse", + SubTitle: "Bruges kun til projektets egen proxy", + }, + WebDav: { + Endpoint: "WebDAV-adresse", + UserName: "Brugernavn", + Password: "Kodeord", + }, + UpStash: { + Endpoint: "UpStash Redis REST URL", + UserName: "Backup-navn", + Password: "UpStash Redis REST Token", + }, + }, + LocalState: "Lokale data", + Overview: (overview: any) => + `${overview.chat} chats, ${overview.message} beskeder, ${overview.prompt} prompts, ${overview.mask} personaer`, + ImportFailed: "Import mislykkedes", + }, + Mask: { + Splash: { + Title: "Persona-forside", + SubTitle: "Vis denne side, når du opretter ny chat", + }, + Builtin: { + Title: "Skjul indbyggede personaer", + SubTitle: "Vis ikke de indbyggede personaer i listen", + }, + }, + Prompt: { + Disable: { + Title: "Slå auto-forslag fra", + SubTitle: "Tast / for at få forslag", + }, + List: "Prompt-liste", + ListCount: (builtin: number, custom: number) => + `${builtin} indbygget, ${custom} brugerdefineret`, + Edit: "Rediger", + Modal: { + Title: "Prompt-liste", + Add: "Tilføj", + Search: "Søg prompts", + }, + EditModal: { + Title: "Rediger prompt", + }, + }, + HistoryCount: { + Title: "Antal beskeder, der følger med", + SubTitle: "Hvor mange af de tidligere beskeder, der sendes hver gang", + }, + CompressThreshold: { + Title: "Komprimeringsgrænse", + SubTitle: + "Hvis chatten bliver for lang, vil den komprimeres efter dette antal tegn", + }, + Usage: { + Title: "Brug og saldo", + SubTitle(used: any, total: any) { + return `Du har brugt $${used} i denne måned, og din grænse er $${total}.`; + }, + IsChecking: "Tjekker...", + Check: "Tjek igen", + NoAccess: "Indtast API-nøgle for at se forbrug", + }, + Access: { + AccessCode: { + Title: "Adgangskode", + SubTitle: "Adgangskontrol er slået til", + Placeholder: "Skriv kode her", + }, + CustomEndpoint: { + Title: "Brugerdefineret adresse", + SubTitle: "Brug Azure eller OpenAI fra egen server", + }, + Provider: { + Title: "Model-udbyder", + SubTitle: "Vælg Azure eller OpenAI", + }, + OpenAI: { + ApiKey: { + Title: "OpenAI API-nøgle", + SubTitle: "Brug din egen nøgle", + Placeholder: "sk-xxx", + }, + Endpoint: { + Title: "OpenAI Endpoint", + SubTitle: "Skal starte med http(s):// eller /api/openai som standard", + }, + }, + Azure: { + ApiKey: { + Title: "Azure Api Key", + SubTitle: "Hent din nøgle fra Azure-portalen", + Placeholder: "Azure Api Key", + }, + Endpoint: { + Title: "Azure Endpoint", + SubTitle: "F.eks.: ", + }, + ApiVerion: { + Title: "Azure Api Version", + SubTitle: "Hentet fra Azure-portalen", + }, + }, + Anthropic: { + ApiKey: { + Title: "Anthropic API-nøgle", + SubTitle: "Brug din egen Anthropic-nøgle", + Placeholder: "Anthropic API Key", + }, + Endpoint: { + Title: "Endpoint-adresse", + SubTitle: "F.eks.: ", + }, + ApiVerion: { + Title: "API-version (Claude)", + SubTitle: "Vælg den ønskede version", + }, + }, + Baidu: { + ApiKey: { + Title: "Baidu-nøgle", + SubTitle: "Din egen Baidu-nøgle", + Placeholder: "Baidu API Key", + }, + SecretKey: { + Title: "Baidu hemmelig nøgle", + SubTitle: "Din egen hemmelige nøgle fra Baidu", + Placeholder: "Baidu Secret Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "Kan ikke ændres, se .env", + }, + }, + Tencent: { + ApiKey: { + Title: "Tencent-nøgle", + SubTitle: "Din egen nøgle fra Tencent", + Placeholder: "Tencent API Key", + }, + SecretKey: { + Title: "Tencent hemmelig nøgle", + SubTitle: "Din egen hemmelige nøgle fra Tencent", + Placeholder: "Tencent Secret Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "Kan ikke ændres, se .env", + }, + }, + ByteDance: { + ApiKey: { + Title: "ByteDance-nøgle", + SubTitle: "Din egen nøgle til ByteDance", + Placeholder: "ByteDance API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + Alibaba: { + ApiKey: { + Title: "Alibaba-nøgle", + SubTitle: "Din egen Alibaba Cloud-nøgle", + Placeholder: "Alibaba Cloud API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + Moonshot: { + ApiKey: { + Title: "Moonshot-nøgle", + SubTitle: "Din egen Moonshot-nøgle", + Placeholder: "Moonshot API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + DeepSeek: { + ApiKey: { + Title: "DeepSeek-nøgle", + SubTitle: "Din egen DeepSeek-nøgle", + Placeholder: "DeepSeek API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + XAI: { + ApiKey: { + Title: "XAI-nøgle", + SubTitle: "Din egen XAI-nøgle", + Placeholder: "XAI API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + ChatGLM: { + ApiKey: { + Title: "ChatGLM-nøgle", + SubTitle: "Din egen ChatGLM-nøgle", + Placeholder: "ChatGLM API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + SiliconFlow: { + ApiKey: { + Title: "SiliconFlow-nøgle", + SubTitle: "Din egen SiliconFlow-nøgle", + Placeholder: "SiliconFlow API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + Stability: { + ApiKey: { + Title: "Stability-nøgle", + SubTitle: "Din egen Stability-nøgle", + Placeholder: "Stability API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + Iflytek: { + ApiKey: { + Title: "Iflytek API Key", + SubTitle: "Nøgle fra Iflytek", + Placeholder: "Iflytek API Key", + }, + ApiSecret: { + Title: "Iflytek hemmelig nøgle", + SubTitle: "Hentet fra Iflytek", + Placeholder: "Iflytek API Secret", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, + CustomModel: { + Title: "Egne modelnavne", + SubTitle: "Skriv komma-adskilte navne", + }, + Google: { + ApiKey: { + Title: "Google-nøgle", + SubTitle: "Få din nøgle hos Google AI", + Placeholder: "Google AI API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + ApiVersion: { + Title: "API-version (til gemini-pro)", + SubTitle: "Vælg en bestemt version", + }, + GoogleSafetySettings: { + Title: "Google sikkerhedsindstillinger", + SubTitle: "Vælg et niveau for indholdskontrol", + }, + }, + }, + Model: "Model", + CompressModel: { + Title: "Opsummeringsmodel", + SubTitle: "Bruges til at korte historik ned og lave titel", + }, + Temperature: { + Title: "Temperatur", + SubTitle: "Jo højere tal, jo mere kreativt svar", + }, + TopP: { + Title: "Top P", + SubTitle: "Skal ikke ændres sammen med temperatur", + }, + MaxTokens: { + Title: "Maks. længde", + SubTitle: "Hvor mange tokens (ord/stykker tekst) der kan bruges", + }, + PresencePenalty: { + Title: "Nye emner", + SubTitle: "Jo højere tal, jo mere nyt indhold", + }, + FrequencyPenalty: { + Title: "Gentagelsesstraf", + SubTitle: "Jo højere tal, jo mindre gentagelse", + }, + TTS: { + Enable: { + Title: "Tænd for oplæsning (TTS)", + SubTitle: "Slå tekst-til-tale til", + }, + Autoplay: { + Title: "Automatisk oplæsning", + SubTitle: "Laver lyd automatisk, hvis TTS er slået til", + }, + Model: "Model", + Voice: { + Title: "Stemme", + SubTitle: "Hvilken stemme der bruges til lyd", + }, + Speed: { + Title: "Hastighed", + SubTitle: "Hvor hurtigt der oplæses", + }, + Engine: "TTS-motor", + }, + Realtime: { + Enable: { + Title: "Live-chat", + SubTitle: "Slå live-svar til", + }, + Provider: { + Title: "Modeludbyder", + SubTitle: "Vælg forskellig udbyder", + }, + Model: { + Title: "Model", + SubTitle: "Vælg en model", + }, + ApiKey: { + Title: "API-nøgle", + SubTitle: "Din nøgle", + Placeholder: "API-nøgle", + }, + Azure: { + Endpoint: { + Title: "Adresse", + SubTitle: "Endpoint til Azure", + }, + Deployment: { + Title: "Udrulningsnavn", + SubTitle: "Navn for dit Azure-setup", + }, + }, + Temperature: { + Title: "Temperatur", + SubTitle: "Højere tal = mere varierede svar", + }, + }, + }, + Store: { + DefaultTopic: "Ny samtale", + BotHello: "Hej! Hvordan kan jeg hjælpe dig i dag?", + Error: "Noget gik galt. Prøv igen senere.", + Prompt: { + History: (content: string) => + "Her er et kort resume af, hvad vi har snakket om: " + content, + Topic: + "Find en kort overskrift med 4-5 ord om emnet. Ingen tegnsætning eller anførselstegn.", + Summarize: + "Skriv et kort resumé (under 200 ord) af vores samtale til senere brug.", + }, + }, + Copy: { + Success: "Kopieret", + Failed: "Kunne ikke kopiere. Giv adgang til udklipsholder.", + }, + Download: { + Success: "Filen er downloadet.", + Failed: "Download fejlede.", + }, + Context: { + Toast: (x: any) => `Inkluderer ${x} ekstra prompts`, + Edit: "Chatindstillinger", + Add: "Tilføj prompt", + Clear: "Kontekst ryddet", + Revert: "Fortryd", + }, + Discovery: { + Name: "Oplev", + }, + Mcp: { + Name: "MCP", + }, + FineTuned: { + Sysmessage: "Du er en hjælper, der skal...", + }, + SearchChat: { + Name: "Søg", + Page: { + Title: "Søg i tidligere chats", + Search: "Skriv her for at søge", + NoResult: "Ingen resultater", + NoData: "Ingen data", + Loading: "Henter...", + SubTitle: (count: number) => `Fandt ${count} resultater`, + }, + Item: { + View: "Vis", + }, + }, + Plugin: { + Name: "Plugin", + Page: { + Title: "Plugins", + SubTitle: (count: number) => `${count} plugins`, + Search: "Søg plugin", + Create: "Opret nyt", + Find: "Du kan finde flere plugins på GitHub: ", + }, + Item: { + Info: (count: number) => `${count} metode`, + View: "Vis", + Edit: "Rediger", + Delete: "Slet", + DeleteConfirm: "Vil du slette?", + }, + Auth: { + None: "Ingen", + Basic: "Basic", + Bearer: "Bearer", + Custom: "Tilpasset", + CustomHeader: "Parameternavn", + Token: "Token", + Proxy: "Brug Proxy", + ProxyDescription: "Løs CORS-problemer med Proxy", + Location: "Sted", + LocationHeader: "Header", + LocationQuery: "Query", + LocationBody: "Body", + }, + EditModal: { + Title: (readonly: boolean) => + `Rediger Plugin ${readonly ? "(skrivebeskyttet)" : ""}`, + Download: "Download", + Auth: "Godkendelsestype", + Content: "OpenAPI Schema", + Load: "Hent fra URL", + Method: "Metode", + Error: "Fejl i OpenAPI Schema", + }, + }, + Mask: { + Name: "Persona", + Page: { + Title: "Prompts som personaer", + SubTitle: (count: number) => `${count} skabeloner`, + Search: "Søg skabeloner", + Create: "Opret ny", + }, + Item: { + Info: (count: number) => `${count} prompts`, + Chat: "Chat", + View: "Vis", + Edit: "Rediger", + Delete: "Slet", + DeleteConfirm: "Vil du slette?", + }, + EditModal: { + Title: (readonly: boolean) => + `Rediger skabelon ${readonly ? "(skrivebeskyttet)" : ""}`, + Download: "Download", + Clone: "Klon", + }, + Config: { + Avatar: "Chat-avatar", + Name: "Chat-navn", + Sync: { + Title: "Brug globale indstillinger", + SubTitle: "Gældende for denne chat", + Confirm: "Erstat nuværende indstillinger med globale?", + }, + HideContext: { + Title: "Skjul ekstra prompts", + SubTitle: "Vis dem ikke på chat-skærmen", + }, + Artifacts: { + Title: "Brug Artefakter", + SubTitle: "Gør det muligt at vise HTML-sider", + }, + CodeFold: { + Title: "Fold kode sammen", + SubTitle: "Luk/åbn lange kodestykker automatisk", + }, + Share: { + Title: "Del denne persona", + SubTitle: "Få et link til denne skabelon", + Action: "Kopiér link", + }, + }, + }, + NewChat: { + Return: "Tilbage", + Skip: "Start straks", + Title: "Vælg en persona", + SubTitle: "Chat med den persona, du vælger", + More: "Se flere", + NotShow: "Vis ikke igen", + ConfirmNoShow: + "Er du sikker på, at du ikke vil se det igen? Du kan altid slå det til under indstillinger.", + }, + UI: { + Confirm: "OK", + Cancel: "Fortryd", + Close: "Luk", + Create: "Opret", + Edit: "Rediger", + Export: "Eksporter", + Import: "Importér", + Sync: "Synk", + Config: "Konfigurer", + }, + Exporter: { + Description: { + Title: "Kun beskeder efter sidste rydning vises", + }, + Model: "Model", + Messages: "Beskeder", + Topic: "Emne", + Time: "Tid", + }, + URLCommand: { + Code: "Så ud til, at der var en kode i linket. Vil du bruge den?", + Settings: "Så ud til, at der var indstillinger i linket. Vil du bruge dem?", + }, + SdPanel: { + Prompt: "Prompt", + NegativePrompt: "Negativ prompt", + PleaseInput: (name: string) => `Indtast: ${name}`, + AspectRatio: "Billedformat", + ImageStyle: "Stil", + OutFormat: "Uddataformat", + AIModel: "AI-model", + ModelVersion: "Version", + Submit: "Send", + ParamIsRequired: (name: string) => `${name} er krævet`, + Styles: { + D3Model: "3d-model", + AnalogFilm: "analog-film", + Anime: "anime", + Cinematic: "cinematisk", + ComicBook: "tegneserie", + DigitalArt: "digital-art", + Enhance: "enhance", + FantasyArt: "fantasy-art", + Isometric: "isometric", + LineArt: "line-art", + LowPoly: "low-poly", + ModelingCompound: "modeling-compound", + NeonPunk: "neon-punk", + Origami: "origami", + Photographic: "fotografisk", + PixelArt: "pixel-art", + TileTexture: "tile-texture", + }, + }, + Sd: { + SubTitle: (count: number) => `${count} billeder`, + Actions: { + Params: "Se indstillinger", + Copy: "Kopiér prompt", + Delete: "Slet", + Retry: "Prøv igen", + ReturnHome: "Til forsiden", + History: "Historik", + }, + EmptyRecord: "Ingen billeder endnu", + Status: { + Name: "Status", + Success: "Ok", + Error: "Fejl", + Wait: "Venter", + Running: "I gang", + }, + Danger: { + Delete: "Vil du slette?", + }, + GenerateParams: "Genereringsvalg", + Detail: "Detaljer", + }, +}; + +export default da; diff --git a/app/locales/index.ts b/app/locales/index.ts index c8eb64df6..43b17ae81 100644 --- a/app/locales/index.ts +++ b/app/locales/index.ts @@ -2,6 +2,7 @@ import cn from "./cn"; import en from "./en"; import pt from "./pt"; import tw from "./tw"; +import da from "./da"; import id from "./id"; import fr from "./fr"; import es from "./es"; @@ -30,6 +31,7 @@ const ALL_LANGS = { en, tw, pt, + da, jp, ko, id, @@ -56,6 +58,7 @@ export const ALL_LANG_OPTIONS: Record = { en: "English", pt: "Português", tw: "繁體中文", + da: "Dansk", jp: "日本語", ko: "한국어", id: "Indonesia", @@ -141,6 +144,7 @@ export const STT_LANG_MAP: Record = { en: "en-US", pt: "pt-BR", tw: "zh-TW", + da: "da-DK", jp: "ja-JP", ko: "ko-KR", id: "id-ID", From 90827fc593f2e756264c0d309e638491105b669b Mon Sep 17 00:00:00 2001 From: Rasmus Erik Voel Jensen Date: Sat, 15 Feb 2025 13:08:58 +0100 Subject: [PATCH 26/37] danish rewording / improved button label --- app/locales/da.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/locales/da.ts b/app/locales/da.ts index e4d74eab8..7090b062b 100644 --- a/app/locales/da.ts +++ b/app/locales/da.ts @@ -626,7 +626,7 @@ const da: PartialLocaleType = { Revert: "Fortryd", }, Discovery: { - Name: "Oplev", + Name: "Søgning og plugins", }, Mcp: { Name: "MCP", From 8bd0d6a1a7abccc736769b9f2b2b9c9ee75b81a8 Mon Sep 17 00:00:00 2001 From: river Date: Sun, 16 Feb 2025 10:48:54 +0800 Subject: [PATCH 27/37] chore: Update NextChatAI domain from nextchat.dev to nextchat.club --- README.md | 6 +++--- README_CN.md | 2 +- README_JA.md | 2 +- app/constant.ts | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 3c23f4993..63d7c35c2 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
- + icon @@ -22,10 +22,10 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with Claude, GPT [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) +[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[saas-url]: https://nextchat.dev/chat?utm_source=readme +[saas-url]: https://nextchat.club?utm_source=readme [saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases diff --git a/README_CN.md b/README_CN.md index 9348176e5..2d2b28e82 100644 --- a/README_CN.md +++ b/README_CN.md @@ -8,7 +8,7 @@ 一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。 -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) diff --git a/README_JA.md b/README_JA.md index 29eb0d275..f1c2da457 100644 --- a/README_JA.md +++ b/README_JA.md @@ -5,7 +5,7 @@ ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) diff --git a/app/constant.ts b/app/constant.ts index 14c8c78e5..70131e879 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -751,5 +751,5 @@ export const internalAllowedWebDavEndpoints = [ export const DEFAULT_GA_ID = "G-89WN60ZK2E"; -export const SAAS_CHAT_URL = "https://nextchat.dev/chat"; -export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github"; +export const SAAS_CHAT_URL = "https://nextchat.club"; +export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github"; From 2b5f6003086f65f5361ccfc5dc83242f2ca813b8 Mon Sep 17 00:00:00 2001 From: RiverRay Date: Fri, 21 Feb 2025 08:55:40 +0800 Subject: [PATCH 28/37] Update README.md --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index 15c16eb68..9451a5624 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,24 @@ English / [简体中文](./README_CN.md)
+## 👋 Hey, NextChat is going to develop a native app! + +> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together! + + +✨ Several key points: + +- Starting from 0, you are a veteran +- Completely open source, not hidden +- Native development, pursuing the ultimate experience + +Will you come and do something together? 😎 + +https://github.com/ChatGPTNextWeb/NextChat/issues/6269 + +#Seeking for talents is thirsty #lack of people" + + ## 🥳 Cheer for DeepSeek, China's AI star! > Purpose-Built UI for DeepSeek Reasoner Model From f5f3ce94f63bceadff24ca1beff3ae85d142f92e Mon Sep 17 00:00:00 2001 From: RiverRay Date: Fri, 21 Feb 2025 08:56:43 +0800 Subject: [PATCH 29/37] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9451a5624..fbc087697 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Will you come and do something together? 😎 https://github.com/ChatGPTNextWeb/NextChat/issues/6269 -#Seeking for talents is thirsty #lack of people" +#Seeking for talents is thirsty #lack of people ## 🥳 Cheer for DeepSeek, China's AI star! From b709ee3983ee410981302c2f35e02a89f34ce959 Mon Sep 17 00:00:00 2001 From: EvanWu <850123119@qq.com> Date: Mon, 24 Feb 2025 20:18:07 +0800 Subject: [PATCH 30/37] feat(alibaba): Added alibaba vision model and omni model support --- app/client/api.ts | 5 +++++ app/client/platforms/alibaba.ts | 38 ++++++++++++++++++++++----------- app/constant.ts | 10 ++++++++- app/utils/chat.ts | 22 +++++++++++++++++++ 4 files changed, 62 insertions(+), 13 deletions(-) diff --git a/app/client/api.ts b/app/client/api.ts index 64ac82b2a..f5288593d 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -40,6 +40,11 @@ export interface MultimodalContent { }; } +export interface MultimodalContentForAlibaba { + text?: string; + image?: string; +} + export interface RequestMessage { role: MessageRole; content: string | MultimodalContent[]; diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index 88511768c..4875e5c02 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -7,7 +7,10 @@ import { ChatMessageTool, usePluginStore, } from "@/app/store"; -import { streamWithThink } from "@/app/utils/chat"; +import { + preProcessImageContentForAlibabaDashScope, + streamWithThink, +} from "@/app/utils/chat"; import { ChatOptions, getHeaders, @@ -15,12 +18,14 @@ import { LLMModel, SpeechOptions, MultimodalContent, + MultimodalContentForAlibaba, } from "../api"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, getMessageTextContentWithoutThinking, getTimeoutMSByModel, + isVisionModel, } from "@/app/utils"; import { fetch } from "@/app/utils/stream"; @@ -89,14 +94,6 @@ export class QwenApi implements LLMApi { } async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ - role: v.role, - content: - v.role === "assistant" - ? getMessageTextContentWithoutThinking(v) - : getMessageTextContent(v), - })); - const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, @@ -105,6 +102,21 @@ export class QwenApi implements LLMApi { }, }; + const visionModel = isVisionModel(options.config.model); + + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = ( + visionModel + ? await preProcessImageContentForAlibabaDashScope(v.content) + : v.role === "assistant" + ? getMessageTextContentWithoutThinking(v) + : getMessageTextContent(v) + ) as any; + + messages.push({ role: v.role, content }); + } + const shouldStream = !!options.config.stream; const requestPayload: RequestPayload = { model: modelConfig.model, @@ -129,7 +141,7 @@ export class QwenApi implements LLMApi { "X-DashScope-SSE": shouldStream ? "enable" : "disable", }; - const chatPath = this.path(Alibaba.ChatPath); + const chatPath = this.path(Alibaba.ChatPath(modelConfig.model)); const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), @@ -162,7 +174,7 @@ export class QwenApi implements LLMApi { const json = JSON.parse(text); const choices = json.output.choices as Array<{ message: { - content: string | null; + content: string | null | MultimodalContentForAlibaba[]; tool_calls: ChatMessageTool[]; reasoning_content: string | null; }; @@ -212,7 +224,9 @@ export class QwenApi implements LLMApi { } else if (content && content.length > 0) { return { isThinking: false, - content: content, + content: Array.isArray(content) + ? content.map((item) => item.text).join(",") + : content, }; } diff --git a/app/constant.ts b/app/constant.ts index 50aaf7921..358467c63 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -221,7 +221,12 @@ export const ByteDance = { export const Alibaba = { ExampleEndpoint: ALIBABA_BASE_URL, - ChatPath: "v1/services/aigc/text-generation/generation", + ChatPath: (modelName: string) => { + if (modelName.includes("vl") || modelName.includes("omni")) { + return "v1/services/aigc/multimodal-generation/generation"; + } + return `v1/services/aigc/text-generation/generation`; + }, }; export const Tencent = { @@ -568,6 +573,9 @@ const alibabaModes = [ "qwen-max-0403", "qwen-max-0107", "qwen-max-longcontext", + "qwen-omni-turbo", + "qwen-vl-plus", + "qwen-vl-max", ]; const tencentModels = [ diff --git a/app/utils/chat.ts b/app/utils/chat.ts index efc496f2c..ecb2fa468 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -92,6 +92,28 @@ export async function preProcessImageContent( return result; } +export async function preProcessImageContentForAlibabaDashScope( + content: RequestMessage["content"], +) { + if (typeof content === "string") { + return content; + } + const result = []; + for (const part of content) { + if (part?.type == "image_url" && part?.image_url?.url) { + try { + const url = await cacheImageToBase64Image(part?.image_url?.url); + result.push({ image: url }); + } catch (error) { + console.error("Error processing image URL:", error); + } + } else { + result.push({ ...part }); + } + } + return result; +} + const imageCaches: Record = {}; export function cacheImageToBase64Image(imageUrl: string) { if (imageUrl.includes(CACHE_URL_PREFIX)) { From f3154b20a559e0d5b3d0025b13827adb66e1fae0 Mon Sep 17 00:00:00 2001 From: hyiip Date: Tue, 25 Feb 2025 03:55:24 +0800 Subject: [PATCH 31/37] claude 3.7 support --- app/constant.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/constant.ts b/app/constant.ts index 50aaf7921..02ba8dc81 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -535,6 +535,8 @@ const anthropicModels = [ "claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20241022", "claude-3-5-sonnet-latest", + "claude-3-7-sonnet-20250219", + "claude-3-7-sonnet-latest", ]; const baiduModels = [ From 0a25a1a8cbfde5ba8536afda5624195ab1708cbc Mon Sep 17 00:00:00 2001 From: EvanWu <850123119@qq.com> Date: Tue, 25 Feb 2025 09:22:47 +0800 Subject: [PATCH 32/37] refacto(app/utils/chat.ts)r: optimize function preProcessImageContentBase --- app/utils/chat.ts | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/app/utils/chat.ts b/app/utils/chat.ts index ecb2fa468..879d3d198 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -70,8 +70,9 @@ export function compressImage(file: Blob, maxSize: number): Promise { }); } -export async function preProcessImageContent( +export async function preProcessImageContentBase( content: RequestMessage["content"], + transformImageUrl: (url: string) => Promise<{ [key: string]: any }>, ) { if (typeof content === "string") { return content; @@ -81,7 +82,7 @@ export async function preProcessImageContent( if (part?.type == "image_url" && part?.image_url?.url) { try { const url = await cacheImageToBase64Image(part?.image_url?.url); - result.push({ type: part.type, image_url: { url } }); + result.push(await transformImageUrl(url)); } catch (error) { console.error("Error processing image URL:", error); } @@ -92,26 +93,21 @@ export async function preProcessImageContent( return result; } +export async function preProcessImageContent( + content: RequestMessage["content"], +) { + return preProcessImageContentBase(content, async (url) => ({ + type: "image_url", + image_url: { url }, + })); +} + export async function preProcessImageContentForAlibabaDashScope( content: RequestMessage["content"], ) { - if (typeof content === "string") { - return content; - } - const result = []; - for (const part of content) { - if (part?.type == "image_url" && part?.image_url?.url) { - try { - const url = await cacheImageToBase64Image(part?.image_url?.url); - result.push({ image: url }); - } catch (error) { - console.error("Error processing image URL:", error); - } - } else { - result.push({ ...part }); - } - } - return result; + return preProcessImageContentBase(content, async (url) => ({ + image: url, + })); } const imageCaches: Record = {}; From ebcb4db245d3b7b4d34f807c5c7aaa5975ac5330 Mon Sep 17 00:00:00 2001 From: Rex Ng Date: Tue, 25 Feb 2025 14:30:18 +0800 Subject: [PATCH 33/37] Fix: Improve Mistral icon detection and remove redundant code. - Added "codestral" to the list of acceptable names for the Mistral icon, ensuring proper detection. - Removed duplicate `toLowerCase()` calls. --- app/components/emoji.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 1bf39ac1d..31d7f0ac6 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { LlmIcon = BotIconGemma; } else if (modelName.startsWith("claude")) { LlmIcon = BotIconClaude; - } else if (modelName.toLowerCase().includes("llama")) { + } else if (modelName.includes("llama")) { LlmIcon = BotIconMeta; - } else if (modelName.startsWith("mixtral")) { + } else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) { LlmIcon = BotIconMistral; - } else if (modelName.toLowerCase().includes("deepseek")) { + } else if (modelName.includes("deepseek")) { LlmIcon = BotIconDeepseek; } else if (modelName.startsWith("moonshot")) { LlmIcon = BotIconMoonshot; @@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { LlmIcon = BotIconDoubao; } else if ( - modelName.toLowerCase().includes("glm") || + modelName.includes("glm") || modelName.startsWith("cogview-") || modelName.startsWith("cogvideox-") ) { From a2c4e468a08cfe7108d30ac0e63fe43c63fb4bef Mon Sep 17 00:00:00 2001 From: EvanWu <850123119@qq.com> Date: Wed, 26 Feb 2025 19:58:32 +0800 Subject: [PATCH 34/37] fix(app/utils/chat.ts): fix type error --- app/utils/chat.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/utils/chat.ts b/app/utils/chat.ts index 879d3d198..cae775512 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -3,7 +3,7 @@ import { UPLOAD_URL, REQUEST_TIMEOUT_MS, } from "@/app/constant"; -import { RequestMessage } from "@/app/client/api"; +import { MultimodalContent, RequestMessage } from "@/app/client/api"; import Locale from "@/app/locales"; import { EventStreamContentType, @@ -99,7 +99,7 @@ export async function preProcessImageContent( return preProcessImageContentBase(content, async (url) => ({ type: "image_url", image_url: { url }, - })); + })) as Promise; } export async function preProcessImageContentForAlibabaDashScope( From ad6666eeafb38c1faa00ced357187138d7e09bcb Mon Sep 17 00:00:00 2001 From: "Mr. AGI" <102142660+agi-dude@users.noreply.github.com> Date: Fri, 28 Feb 2025 10:47:52 +0500 Subject: [PATCH 35/37] Update README.md --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index fbc087697..93a5289bd 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,6 @@ English / [简体中文](./README_CN.md) [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) [NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) From 9f0182b55efac275094a36fc6a8487f2f619be91 Mon Sep 17 00:00:00 2001 From: Kadxy <2230318258@qq.com> Date: Fri, 28 Feb 2025 13:52:26 +0800 Subject: [PATCH 36/37] fix: enforce that the first message (excluding system messages) is a user message in the Deepseek API --- app/client/platforms/deepseek.ts | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index b21d24cef..db67a92f0 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -75,6 +75,25 @@ export class DeepSeekApi implements LLMApi { } } + // 检测并修复消息顺序,确保除system外的第一个消息是user + const filteredMessages: ChatOptions["messages"] = []; + let hasFoundFirstUser = false; + + for (const msg of messages) { + if (msg.role === "system") { + // Keep all system messages + filteredMessages.push(msg); + } else if (msg.role === "user") { + // User message directly added + filteredMessages.push(msg); + hasFoundFirstUser = true; + } else if (hasFoundFirstUser) { + // After finding the first user message, all subsequent non-system messages are retained. + filteredMessages.push(msg); + } + // If hasFoundFirstUser is false and it is not a system message, it will be skipped. + } + const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, From 2d4180f5be9639fc7a7e050834b1706ac2ee47ee Mon Sep 17 00:00:00 2001 From: Kadxy <2230318258@qq.com> Date: Fri, 28 Feb 2025 13:59:30 +0800 Subject: [PATCH 37/37] fix: update request payload to use filtered messages in Deepseek API --- app/client/platforms/deepseek.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index db67a92f0..1b38b40cc 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -104,7 +104,7 @@ export class DeepSeekApi implements LLMApi { }; const requestPayload: RequestPayload = { - messages, + messages: filteredMessages, stream: options.config.stream, model: modelConfig.model, temperature: modelConfig.temperature,