Compare commits

...

29 Commits

Author SHA1 Message Date
Kadxy
2d4180f5be fix: update request payload to use filtered messages in Deepseek API 2025-02-28 13:59:30 +08:00
Kadxy
9f0182b55e fix: enforce that the first message (excluding system messages) is a user message in the Deepseek API 2025-02-28 13:54:58 +08:00
RiverRay
2167076652 Merge pull request #6293 from hyiip/main
claude 3.7 support
2025-02-26 18:41:28 +08:00
RiverRay
e123076250 Merge pull request #6295 from rexkyng/patch-1
Fix: Improve Mistral icon detection and remove redundant code.
2025-02-26 18:39:59 +08:00
Rex Ng
ebcb4db245 Fix: Improve Mistral icon detection and remove redundant code.
- Added "codestral" to the list of acceptable names for the Mistral icon, ensuring proper detection.
- Removed duplicate `toLowerCase()` calls.
2025-02-25 14:30:18 +08:00
hyiip
f3154b20a5 claude 3.7 support 2025-02-25 03:55:24 +08:00
RiverRay
f5f3ce94f6 Update README.md 2025-02-21 08:56:43 +08:00
RiverRay
2b5f600308 Update README.md 2025-02-21 08:55:40 +08:00
RiverRay
b966107117 Merge pull request #6235 from DBCDK/danish-locale
Translation to danish
2025-02-17 22:58:01 +08:00
river
377480b448 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-16 10:50:07 +08:00
river
8bd0d6a1a7 chore: Update NextChatAI domain from nextchat.dev to nextchat.club 2025-02-16 10:48:54 +08:00
Rasmus Erik Voel Jensen
90827fc593 danish rewording / improved button label 2025-02-15 13:08:58 +01:00
Rasmus Erik Voel Jensen
008e339b6d danish locale 2025-02-15 12:52:44 +01:00
RiverRay
12863f5213 Merge pull request #6204 from bestsanmao/ali_bytedance_reasoning_content
add 3 type of reasoning_content support (+deepseek-r1@OpenAI @Alibaba @ByteDance), parse <think></think> from SSE
2025-02-13 14:53:47 +08:00
suruiqiang
cf140d4228 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content 2025-02-12 17:54:50 +08:00
suruiqiang
476d946f96 fix bug (trim eats space or \n mistakenly), optimize timeout by model 2025-02-12 17:49:54 +08:00
suruiqiang
9714258322 support deepseek-r1@OpenAI's reasoning_content, parse <think></think> from stream 2025-02-11 18:57:16 +08:00
RiverRay
48cd4b11b5 Merge pull request #6190 from siliconflow/refine-emoji-siliconflow
Fix model icon on SiliconFlow
2025-02-11 18:37:47 +08:00
RiverRay
77c78b230a Merge pull request #6193 from siliconflow/get-models-siliconflow
Model listing of SiliconFlow
2025-02-11 18:37:22 +08:00
RiverRay
b44686b887 Merge pull request #6189 from bestsanmao/bug_fix
fix avatar for export message preview and saved image
2025-02-11 18:36:50 +08:00
RiverRay
34bdd4b945 Merge pull request #6194 from siliconflow/vl-support-on-sf
Support VLM on SiliconFlow
2025-02-11 18:35:02 +08:00
suruiqiang
b0758cccde optimization 2025-02-11 16:08:30 +08:00
suruiqiang
98a11e56d2 support alibaba and bytedance's reasoning_content 2025-02-11 12:46:46 +08:00
Shenghang Tsai
86f86962fb Support VLM on SiliconFlow 2025-02-10 13:39:06 +08:00
Shenghang Tsai
2137aa65bf Model listing of SiliconFlow 2025-02-10 11:03:49 +08:00
Shenghang Tsai
18fa2cc30d fix model icon on siliconflow 2025-02-09 18:49:26 +08:00
Shenghang Tsai
0bfc648085 fix model icon on siliconflow 2025-02-09 18:47:57 +08:00
suruiqiang
9f91c2d05c fix avatar for export message preview and saved image 2025-02-09 16:52:46 +08:00
RiverRay
a029b4330b Merge pull request #6188 from ChatGPTNextWeb/Leizhenpeng-patch-4
Update LICENSE
2025-02-09 11:05:43 +08:00
21 changed files with 1273 additions and 332 deletions

View File

@@ -1,6 +1,6 @@
<div align="center"> <div align="center">
<a href='https://nextchat.dev/chat'> <a href='https://nextchat.club'>
<img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/> <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a> </a>
@@ -23,9 +23,10 @@ English / [简体中文](./README_CN.md)
[![Linux][Linux-image]][download-url] [![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) [NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[saas-url]: https://nextchat.dev/chat?utm_source=readme [saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.dev/ [web-url]: https://app.nextchat.dev/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
@@ -40,6 +41,24 @@ English / [简体中文](./README_CN.md)
</div> </div>
## 👋 Hey, NextChat is going to develop a native app!
> This week we are going to start working on iOS and Android APP, and we want to find some reliable friends to do it together!
✨ Several key points:
- Starting from 0, you are a veteran
- Completely open source, not hidden
- Native development, pursuing the ultimate experience
Will you come and do something together? 😎
https://github.com/ChatGPTNextWeb/NextChat/issues/6269
#Seeking for talents is thirsty #lack of people
## 🥳 Cheer for DeepSeek, China's AI star! ## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model > Purpose-Built UI for DeepSeek Reasoner Model

View File

@@ -8,7 +8,7 @@
一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。 一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

View File

@@ -5,7 +5,7 @@
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

View File

@@ -14,8 +14,12 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
if (config.disableGPT4) { if (config.disableGPT4) {
remoteModelRes.data = remoteModelRes.data.filter( remoteModelRes.data = remoteModelRes.data.filter(
(m) => (m) =>
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1") || m.id.startsWith("o3")) || !(
m.id.startsWith("gpt-4o-mini"), m.id.startsWith("gpt-4") ||
m.id.startsWith("chatgpt-4o") ||
m.id.startsWith("o1") ||
m.id.startsWith("o3")
) || m.id.startsWith("gpt-4o-mini"),
); );
} }

View File

@@ -1,12 +1,13 @@
"use client"; "use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
Alibaba, useAppConfig,
ALIBABA_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -15,14 +16,12 @@ import {
SpeechOptions, SpeechOptions,
MultimodalContent, MultimodalContent,
} from "../api"; } from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -92,7 +91,10 @@ export class QwenApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role, role: v.role,
content: getMessageTextContent(v), content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
})); }));
const modelConfig = { const modelConfig = {
@@ -122,134 +124,118 @@ export class QwenApi implements LLMApi {
options.onController?.(controller); options.onController?.(controller);
try { try {
const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath); const chatPath = this.path(Alibaba.ChatPath);
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
signal: controller.signal, signal: controller.signal,
headers: { headers: headers,
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
},
}; };
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
headers,
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) { const tool_calls = choices[0]?.message?.tool_calls;
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); if (tool_calls?.length > 0) {
const fetchText = remainText.slice(0, fetchCount); const index = tool_calls[0]?.index;
responseText += fetchText; const id = tool_calls[0]?.id;
remainText = remainText.slice(fetchCount); const args = tool_calls[0]?.function?.arguments;
options.onUpdate?.(responseText, fetchText); if (id) {
} runTools.push({
id,
requestAnimationFrame(animateResponseText); type: tool_calls[0]?.type,
} function: {
name: tool_calls[0]?.function?.name as string,
// start animaion arguments: args,
animateResponseText(); },
});
const finish = () => { } else {
if (!finished) { // @ts-ignore
finished = true; runTools[index]["function"]["arguments"] += args;
options.onFinish(responseText + remainText, responseRes); }
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Alibaba] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.output.choices as Array<{ isThinking: false,
message: { content: string }; content: content,
}>; };
const delta = choices[0]?.message?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
ApiPath,
Baidu,
BAIDU_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu"; import { getAccessToken } from "@/app/utils/baidu";
@@ -23,7 +18,7 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -1,11 +1,12 @@
"use client"; "use client";
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
ByteDance, useAppConfig,
BYTEDANCE_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { import {
ChatOptions, ChatOptions,
@@ -15,14 +16,14 @@ import {
MultimodalContent, MultimodalContent,
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales";
import { import { streamWithThink } from "@/app/utils/chat";
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import {
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -34,7 +35,7 @@ export interface OpenAIListModelResponse {
}>; }>;
} }
interface RequestPayload { interface RequestPayloadForByteDance {
messages: { messages: {
role: "system" | "user" | "assistant"; role: "system" | "user" | "assistant";
content: string | MultimodalContent[]; content: string | MultimodalContent[];
@@ -86,7 +87,10 @@ export class DoubaoApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
const content = await preProcessImageContent(v.content); const content =
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: await preProcessImageContent(v.content);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
@@ -99,7 +103,7 @@ export class DoubaoApi implements LLMApi {
}; };
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = { const requestPayload: RequestPayloadForByteDance = {
messages, messages,
stream: shouldStream, stream: shouldStream,
model: modelConfig.model, model: modelConfig.model,
@@ -124,119 +128,101 @@ export class DoubaoApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) { const tool_calls = choices[0]?.delta?.tool_calls;
responseText += remainText; if (tool_calls?.length > 0) {
console.log("[Response Animation] finished"); const index = tool_calls[0]?.index;
if (responseText?.length === 0) { const id = tool_calls[0]?.id;
options.onError?.(new Error("empty response from server")); const args = tool_calls[0]?.function?.arguments;
} if (id) {
return; runTools.push({
} id,
type: tool_calls[0]?.type,
if (remainText.length > 0) { function: {
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); name: tool_calls[0]?.function?.name as string,
const fetchText = remainText.slice(0, fetchCount); arguments: args,
responseText += fetchText; },
remainText = remainText.slice(fetchCount); });
options.onUpdate?.(responseText, fetchText); } else {
} // @ts-ignore
runTools[index]["function"]["arguments"] += args;
requestAnimationFrame(animateResponseText); }
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[ByteDance] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.choices as Array<{ isThinking: false,
delta: { content: string }; content: content,
}>; };
const delta = choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayloadForByteDance,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.messages?.splice(
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@@ -1,12 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
ApiPath,
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -80,6 +75,25 @@ export class DeepSeekApi implements LLMApi {
} }
} }
// 检测并修复消息顺序确保除system外的第一个消息是user
const filteredMessages: ChatOptions["messages"] = [];
let hasFoundFirstUser = false;
for (const msg of messages) {
if (msg.role === "system") {
// Keep all system messages
filteredMessages.push(msg);
} else if (msg.role === "user") {
// User message directly added
filteredMessages.push(msg);
hasFoundFirstUser = true;
} else if (hasFoundFirstUser) {
// After finding the first user message, all subsequent non-system messages are retained.
filteredMessages.push(msg);
}
// If hasFoundFirstUser is false and it is not a system message, it will be skipped.
}
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig,
@@ -90,7 +104,7 @@ export class DeepSeekApi implements LLMApi {
}; };
const requestPayload: RequestPayload = { const requestPayload: RequestPayload = {
messages, messages: filteredMessages,
stream: options.config.stream, stream: options.config.stream,
model: modelConfig.model, model: modelConfig.model,
temperature: modelConfig.temperature, temperature: modelConfig.temperature,
@@ -116,16 +130,10 @@ export class DeepSeekApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
// console.log(chatPayload);
const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
@@ -176,8 +184,8 @@ export class DeepSeekApi implements LLMApi {
// Skip if both content and reasoning_content are empty or null // Skip if both content and reasoning_content are empty or null
if ( if (
(!reasoning || reasoning.trim().length === 0) && (!reasoning || reasoning.length === 0) &&
(!content || content.trim().length === 0) (!content || content.length === 0)
) { ) {
return { return {
isThinking: false, isThinking: false,
@@ -185,12 +193,12 @@ export class DeepSeekApi implements LLMApi {
}; };
} }
if (reasoning && reasoning.trim().length > 0) { if (reasoning && reasoning.length > 0) {
return { return {
isThinking: true, isThinking: true,
content: reasoning, content: reasoning,
}; };
} else if (content && content.trim().length > 0) { } else if (content && content.length > 0) {
return { return {
isThinking: false, isThinking: false,
content: content, content: content,

View File

@@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
ApiPath,
CHATGLM_BASE_URL,
ChatGLM,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -21,7 +16,11 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
@@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi {
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (modelType === "image" || modelType === "video") { if (modelType === "image" || modelType === "video") {

View File

@@ -1,9 +1,4 @@
import { import { ApiPath, Google } from "@/app/constant";
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -27,6 +22,7 @@ import {
getMessageTextContent, getMessageTextContent,
getMessageImages, getMessageImages,
isVisionModel, isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid"; import { nanoid } from "nanoid";
@@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -8,7 +8,6 @@ import {
Azure, Azure,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
ServiceProvider, ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
ChatMessageTool, ChatMessageTool,
@@ -22,7 +21,7 @@ import {
preProcessImageContent, preProcessImageContent,
uploadImage, uploadImage,
base64Image2Blob, base64Image2Blob,
stream, streamWithThink,
} from "@/app/utils/chat"; } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
@@ -42,6 +41,7 @@ import {
getMessageTextContent, getMessageTextContent,
isVisionModel, isVisionModel,
isDalle3 as _isDalle3, isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -294,7 +294,7 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [], useChatStore.getState().currentSession().mask?.plugin || [],
); );
// console.log("getAsTools", tools, funcs); // console.log("getAsTools", tools, funcs);
stream( streamWithThink(
chatPath, chatPath,
requestPayload, requestPayload,
getHeaders(), getHeaders(),
@@ -309,8 +309,12 @@ export class ChatGPTApi implements LLMApi {
delta: { delta: {
content: string; content: string;
tool_calls: ChatMessageTool[]; tool_calls: ChatMessageTool[];
reasoning_content: string | null;
}; };
}>; }>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.delta?.tool_calls; const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) { if (tool_calls?.length > 0) {
const id = tool_calls[0]?.id; const id = tool_calls[0]?.id;
@@ -330,7 +334,37 @@ export class ChatGPTApi implements LLMApi {
runTools[index]["function"]["arguments"] += args; runTools[index]["function"]["arguments"] += args;
} }
} }
return choices[0]?.delta?.content;
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
}, },
// processToolMessage, include tool_calls message and tool call results // processToolMessage, include tool_calls message and tool call results
( (
@@ -362,9 +396,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 || isO1OrO3 getTimeoutMSByModel(options.config.model),
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);

View File

@@ -4,7 +4,7 @@ import {
ApiPath, ApiPath,
SILICONFLOW_BASE_URL, SILICONFLOW_BASE_URL,
SiliconFlow, SiliconFlow,
REQUEST_TIMEOUT_MS_FOR_THINKING, DEFAULT_MODELS,
} from "@/app/constant"; } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
@@ -13,7 +13,7 @@ import {
ChatMessageTool, ChatMessageTool,
usePluginStore, usePluginStore,
} from "@/app/store"; } from "@/app/store";
import { streamWithThink } from "@/app/utils/chat"; import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -25,12 +25,23 @@ import { getClientConfig } from "@/app/config/client";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface SiliconFlowListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
export class SiliconflowApi implements LLMApi { export class SiliconflowApi implements LLMApi {
private disableListModels = true; private disableListModels = false;
path(path: string): string { path(path: string): string {
const accessStore = useAccessStore.getState(); const accessStore = useAccessStore.getState();
@@ -71,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
} }
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
if (v.role === "assistant") { if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v); const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} else { } else {
const content = getMessageTextContent(v); const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
} }
@@ -123,7 +137,7 @@ export class SiliconflowApi implements LLMApi {
// Use extended timeout for thinking models as they typically require more processing time // Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS_FOR_THINKING, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
@@ -238,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
} }
async models(): Promise<LLMModel[]> { async models(): Promise<LLMModel[]> {
return []; if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});
const resJson = (await res.json()) as SiliconFlowListModelResponse;
const chatModels = resJson.data;
console.log("[Models]", chatModels);
if (!chatModels) {
return [];
}
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
}));
} }
} }

View File

@@ -1,5 +1,5 @@
"use client"; "use client";
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { import {
@@ -17,7 +17,11 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import mapKeys from "lodash-es/mapKeys"; import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues"; import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray"; import isArray from "lodash-es/isArray";
@@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -1,6 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -17,6 +17,7 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGemma; LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) { } else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude; LlmIcon = BotIconClaude;
} else if (modelName.startsWith("llama")) { } else if (modelName.includes("llama")) {
LlmIcon = BotIconMeta; LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral")) { } else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
LlmIcon = BotIconMistral; LlmIcon = BotIconMistral;
} else if (modelName.startsWith("deepseek")) { } else if (modelName.includes("deepseek")) {
LlmIcon = BotIconDeepseek; LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) { } else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot; LlmIcon = BotIconMoonshot;
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao; LlmIcon = BotIconDoubao;
} else if ( } else if (
modelName.startsWith("glm") || modelName.includes("glm") ||
modelName.startsWith("cogview-") || modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-") modelName.startsWith("cogvideox-")
) { ) {

View File

@@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import ChatGptIcon from "../icons/chatgpt.png"; import ChatGptIcon from "../icons/chatgpt.png";
import ShareIcon from "../icons/share.svg"; import ShareIcon from "../icons/share.svg";
import BotIcon from "../icons/bot.png";
import DownloadIcon from "../icons/download.svg"; import DownloadIcon from "../icons/download.svg";
import { useEffect, useMemo, useRef, useState } from "react"; import { useEffect, useMemo, useRef, useState } from "react";
@@ -33,13 +32,13 @@ import dynamic from "next/dynamic";
import NextImage from "next/image"; import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image"; import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
import { prettyObject } from "../utils/format"; import { prettyObject } from "../utils/format";
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { type ClientApi, getClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { getMessageTextContent } from "../utils"; import { getMessageTextContent } from "../utils";
import { MaskAvatar } from "./mask";
import clsx from "clsx"; import clsx from "clsx";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
@@ -407,22 +406,6 @@ export function PreviewActions(props: {
); );
} }
function ExportAvatar(props: { avatar: string }) {
if (props.avatar === DEFAULT_MASK_AVATAR) {
return (
<img
src={BotIcon.src}
width={30}
height={30}
alt="bot"
className="user-avatar"
/>
);
}
return <Avatar avatar={props.avatar} />;
}
export function ImagePreviewer(props: { export function ImagePreviewer(props: {
messages: ChatMessage[]; messages: ChatMessage[];
topic: string; topic: string;
@@ -546,9 +529,12 @@ export function ImagePreviewer(props: {
github.com/ChatGPTNextWeb/ChatGPT-Next-Web github.com/ChatGPTNextWeb/ChatGPT-Next-Web
</div> </div>
<div className={styles["icons"]}> <div className={styles["icons"]}>
<ExportAvatar avatar={config.avatar} /> <MaskAvatar avatar={config.avatar} />
<span className={styles["icon-space"]}>&</span> <span className={styles["icon-space"]}>&</span>
<ExportAvatar avatar={mask.avatar} /> <MaskAvatar
avatar={mask.avatar}
model={session.mask.modelConfig.model}
/>
</div> </div>
</div> </div>
<div> <div>
@@ -576,9 +562,14 @@ export function ImagePreviewer(props: {
key={i} key={i}
> >
<div className={styles["avatar"]}> <div className={styles["avatar"]}>
<ExportAvatar {m.role === "user" ? (
avatar={m.role === "user" ? config.avatar : mask.avatar} <Avatar avatar={config.avatar}></Avatar>
/> ) : (
<MaskAvatar
avatar={session.mask.avatar}
model={m.model || session.mask.modelConfig.model}
/>
)}
</div> </div>
<div className={styles["body"]}> <div className={styles["body"]}>

View File

@@ -258,6 +258,7 @@ export const ChatGLM = {
export const SiliconFlow = { export const SiliconFlow = {
ExampleEndpoint: SILICONFLOW_BASE_URL, ExampleEndpoint: SILICONFLOW_BASE_URL,
ChatPath: "v1/chat/completions", ChatPath: "v1/chat/completions",
ListModelPath: "v1/models?&sub_type=chat",
}; };
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
@@ -462,6 +463,7 @@ export const VISION_MODEL_REGEXES = [
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview" /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
/^dall-e-3$/, // Matches exactly "dall-e-3" /^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/, /glm-4v/,
/vl/i,
]; ];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -533,6 +535,8 @@ const anthropicModels = [
"claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022", "claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest", "claude-3-5-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-latest",
]; ];
const baiduModels = [ const baiduModels = [
@@ -814,5 +818,5 @@ export const internalAllowedWebDavEndpoints = [
export const DEFAULT_GA_ID = "G-89WN60ZK2E"; export const DEFAULT_GA_ID = "G-89WN60ZK2E";
export const SAAS_CHAT_URL = "https://nextchat.dev/chat"; export const SAAS_CHAT_URL = "https://nextchat.club";
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github"; export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";

832
app/locales/da.ts Normal file
View File

@@ -0,0 +1,832 @@
import { getClientConfig } from "../config/client";
import { SubmitKey } from "../store/config";
import { SAAS_CHAT_UTM_URL } from "@/app/constant";
import { PartialLocaleType } from "./index";
const isApp = !!getClientConfig()?.isApp;
const da: PartialLocaleType = {
WIP: "Der kommer snart mere...",
Error: {
Unauthorized: isApp
? `Hov, der skete en fejl. Sådan kan du komme videre:
\\ 1⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL})
\\ 2⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️`
: `Hov, der skete en fejl. Lad os løse det:
\\ 1⃣ Er du ny her? [Tryk for at starte nu 🚀](${SAAS_CHAT_UTM_URL})
\\ 2⃣ Bruger du en privat opsætning? [Tryk her](/#/auth) for at taste din nøgle 🔑
\\ 3⃣ Vil du bruge dine egne OpenAI-nøgler? [Tryk her](/#/settings) for at ændre indstillinger ⚙️
`,
},
Auth: {
Return: "Tilbage",
Title: "Adgangskode",
Tips: "Skriv venligst koden herunder",
SubTips: "Eller brug din egen OpenAI- eller Google-nøgle",
Input: "Adgangskode",
Confirm: "OK",
Later: "Senere",
SaasTips: "Hvis det er for svært, kan du starte nu",
},
ChatItem: {
ChatItemCount: (count: number) => `${count} beskeder`,
},
Chat: {
SubTitle: (count: number) => `${count} beskeder`,
EditMessage: {
Title: "Rediger beskeder",
Topic: {
Title: "Emne",
SubTitle: "Skift emne for denne chat",
},
},
Actions: {
ChatList: "Gå til chatliste",
CompressedHistory: "Komprimeret historie",
Export: "Eksporter alle beskeder som Markdown",
Copy: "Kopiér",
Stop: "Stop",
Retry: "Prøv igen",
Pin: "Fastgør",
PinToastContent: "1 besked er nu fastgjort",
PinToastAction: "Se",
Delete: "Slet",
Edit: "Rediger",
FullScreen: "Fuld skærm",
RefreshTitle: "Opdatér titel",
RefreshToast: "Anmodning om ny titel sendt",
Speech: "Afspil",
StopSpeech: "Stop",
},
Commands: {
new: "Ny chat",
newm: "Ny chat med persona",
next: "Næste chat",
prev: "Forrige chat",
clear: "Ryd alt før",
fork: "Kopiér chat",
del: "Slet chat",
},
InputActions: {
Stop: "Stop",
ToBottom: "Ned til nyeste",
Theme: {
auto: "Automatisk",
light: "Lyst tema",
dark: "Mørkt tema",
},
Prompt: "Prompts",
Masks: "Personaer",
Clear: "Ryd kontekst",
Settings: "Indstillinger",
UploadImage: "Upload billeder",
},
Rename: "Omdøb chat",
Typing: "Skriver…",
Input: (submitKey: string) => {
let inputHints = `${submitKey} for at sende`;
if (submitKey === String(SubmitKey.Enter)) {
inputHints += ", Shift + Enter for ny linje";
}
return (
inputHints + ", / for at søge i prompts, : for at bruge kommandoer"
);
},
Send: "Send",
StartSpeak: "Start oplæsning",
StopSpeak: "Stop oplæsning",
Config: {
Reset: "Nulstil til standard",
SaveAs: "Gem som persona",
},
IsContext: "Ekstra prompt til baggrund",
ShortcutKey: {
Title: "Hurtigtaster",
newChat: "Åbn ny chat",
focusInput: "Fokus på tekstfeltet",
copyLastMessage: "Kopiér sidste svar",
copyLastCode: "Kopiér sidste kodeblok",
showShortcutKey: "Vis hurtigtaster",
clearContext: "Ryd kontekst",
},
},
Export: {
Title: "Eksportér beskeder",
Copy: "Kopiér alt",
Download: "Download",
MessageFromYou: "Fra dig",
MessageFromChatGPT: "Fra ChatGPT",
Share: "Del til ShareGPT",
Format: {
Title: "Filformat",
SubTitle: "Vælg enten Markdown eller PNG-billede",
},
IncludeContext: {
Title: "Tag baggrund med",
SubTitle: "Skal ekstra baggrund (persona) med i eksporten?",
},
Steps: {
Select: "Vælg",
Preview: "Forhåndsvis",
},
Image: {
Toast: "Laver billede...",
Modal: "Tryk længe eller højreklik for at gemme",
},
Artifacts: {
Title: "Del side",
Error: "Fejl ved deling",
},
},
Select: {
Search: "Søg",
All: "Vælg alle",
Latest: "Vælg nyeste",
Clear: "Ryd alt",
},
Memory: {
Title: "Huskesætning",
EmptyContent: "Ingenting lige nu.",
Send: "Send huskesætning",
Copy: "Kopiér huskesætning",
Reset: "Nulstil chat",
ResetConfirm:
"Dette sletter nuværende samtale og hukommelse. Er du sikker?",
},
Home: {
NewChat: "Ny Chat",
DeleteChat: "Vil du slette den valgte chat?",
DeleteToast: "Chat slettet",
Revert: "Fortryd",
},
Settings: {
Title: "Indstillinger",
SubTitle: "Alle indstillinger",
ShowPassword: "Vis kodeord",
Danger: {
Reset: {
Title: "Nulstil alle indstillinger",
SubTitle: "Gendan alt til standard",
Action: "Nulstil",
Confirm: "Vil du virkelig nulstille alt?",
},
Clear: {
Title: "Slet alle data",
SubTitle: "Sletter alt om beskeder og indstillinger",
Action: "Slet",
Confirm: "Er du sikker på, at du vil slette alt?",
},
},
Lang: {
Name: "Language",
All: "Alle sprog",
},
Avatar: "Avatar",
FontSize: {
Title: "Skriftstørrelse",
SubTitle: "Vælg, hvor stor teksten skal være",
},
FontFamily: {
Title: "Skrifttype",
SubTitle: "Hvis tom, bruger den standard skrifttype",
Placeholder: "Skrifttype-navn",
},
InjectSystemPrompts: {
Title: "Tilføj system-prompt",
SubTitle: "Læg altid en ekstra prompt først i anmodninger",
},
InputTemplate: {
Title: "Tekstskabelon",
SubTitle: "Den seneste besked placeres i denne skabelon",
},
Update: {
Version: (x: string) => `Version: ${x}`,
IsLatest: "Du har nyeste version",
CheckUpdate: "Tjek efter opdatering",
IsChecking: "Tjekker...",
FoundUpdate: (x: string) => `Ny version fundet: ${x}`,
GoToUpdate: "Opdatér",
Success: "Opdatering lykkedes.",
Failed: "Opdatering mislykkedes.",
},
SendKey: "Tast for send",
Theme: "Tema",
TightBorder: "Stram kant",
SendPreviewBubble: {
Title: "Forhåndsvisnings-boble",
SubTitle: "Vis tekst, før den sendes",
},
AutoGenerateTitle: {
Title: "Lav titel automatisk",
SubTitle: "Foreslå en titel ud fra chatten",
},
Sync: {
CloudState: "Seneste opdatering",
NotSyncYet: "Endnu ikke synkroniseret",
Success: "Synkronisering lykkedes",
Fail: "Synkronisering mislykkedes",
Config: {
Modal: {
Title: "Indstil synk",
Check: "Tjek forbindelse",
},
SyncType: {
Title: "Synk-type",
SubTitle: "Vælg en synk-tjeneste",
},
Proxy: {
Title: "Aktivér proxy",
SubTitle: "Brug proxy for at undgå netværksproblemer",
},
ProxyUrl: {
Title: "Proxy-adresse",
SubTitle: "Bruges kun til projektets egen proxy",
},
WebDav: {
Endpoint: "WebDAV-adresse",
UserName: "Brugernavn",
Password: "Kodeord",
},
UpStash: {
Endpoint: "UpStash Redis REST URL",
UserName: "Backup-navn",
Password: "UpStash Redis REST Token",
},
},
LocalState: "Lokale data",
Overview: (overview: any) =>
`${overview.chat} chats, ${overview.message} beskeder, ${overview.prompt} prompts, ${overview.mask} personaer`,
ImportFailed: "Import mislykkedes",
},
Mask: {
Splash: {
Title: "Persona-forside",
SubTitle: "Vis denne side, når du opretter ny chat",
},
Builtin: {
Title: "Skjul indbyggede personaer",
SubTitle: "Vis ikke de indbyggede personaer i listen",
},
},
Prompt: {
Disable: {
Title: "Slå auto-forslag fra",
SubTitle: "Tast / for at få forslag",
},
List: "Prompt-liste",
ListCount: (builtin: number, custom: number) =>
`${builtin} indbygget, ${custom} brugerdefineret`,
Edit: "Rediger",
Modal: {
Title: "Prompt-liste",
Add: "Tilføj",
Search: "Søg prompts",
},
EditModal: {
Title: "Rediger prompt",
},
},
HistoryCount: {
Title: "Antal beskeder, der følger med",
SubTitle: "Hvor mange af de tidligere beskeder, der sendes hver gang",
},
CompressThreshold: {
Title: "Komprimeringsgrænse",
SubTitle:
"Hvis chatten bliver for lang, vil den komprimeres efter dette antal tegn",
},
Usage: {
Title: "Brug og saldo",
SubTitle(used: any, total: any) {
return `Du har brugt $${used} i denne måned, og din grænse er $${total}.`;
},
IsChecking: "Tjekker...",
Check: "Tjek igen",
NoAccess: "Indtast API-nøgle for at se forbrug",
},
Access: {
AccessCode: {
Title: "Adgangskode",
SubTitle: "Adgangskontrol er slået til",
Placeholder: "Skriv kode her",
},
CustomEndpoint: {
Title: "Brugerdefineret adresse",
SubTitle: "Brug Azure eller OpenAI fra egen server",
},
Provider: {
Title: "Model-udbyder",
SubTitle: "Vælg Azure eller OpenAI",
},
OpenAI: {
ApiKey: {
Title: "OpenAI API-nøgle",
SubTitle: "Brug din egen nøgle",
Placeholder: "sk-xxx",
},
Endpoint: {
Title: "OpenAI Endpoint",
SubTitle: "Skal starte med http(s):// eller /api/openai som standard",
},
},
Azure: {
ApiKey: {
Title: "Azure Api Key",
SubTitle: "Hent din nøgle fra Azure-portalen",
Placeholder: "Azure Api Key",
},
Endpoint: {
Title: "Azure Endpoint",
SubTitle: "F.eks.: ",
},
ApiVerion: {
Title: "Azure Api Version",
SubTitle: "Hentet fra Azure-portalen",
},
},
Anthropic: {
ApiKey: {
Title: "Anthropic API-nøgle",
SubTitle: "Brug din egen Anthropic-nøgle",
Placeholder: "Anthropic API Key",
},
Endpoint: {
Title: "Endpoint-adresse",
SubTitle: "F.eks.: ",
},
ApiVerion: {
Title: "API-version (Claude)",
SubTitle: "Vælg den ønskede version",
},
},
Baidu: {
ApiKey: {
Title: "Baidu-nøgle",
SubTitle: "Din egen Baidu-nøgle",
Placeholder: "Baidu API Key",
},
SecretKey: {
Title: "Baidu hemmelig nøgle",
SubTitle: "Din egen hemmelige nøgle fra Baidu",
Placeholder: "Baidu Secret Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "Kan ikke ændres, se .env",
},
},
Tencent: {
ApiKey: {
Title: "Tencent-nøgle",
SubTitle: "Din egen nøgle fra Tencent",
Placeholder: "Tencent API Key",
},
SecretKey: {
Title: "Tencent hemmelig nøgle",
SubTitle: "Din egen hemmelige nøgle fra Tencent",
Placeholder: "Tencent Secret Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "Kan ikke ændres, se .env",
},
},
ByteDance: {
ApiKey: {
Title: "ByteDance-nøgle",
SubTitle: "Din egen nøgle til ByteDance",
Placeholder: "ByteDance API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
Alibaba: {
ApiKey: {
Title: "Alibaba-nøgle",
SubTitle: "Din egen Alibaba Cloud-nøgle",
Placeholder: "Alibaba Cloud API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
Moonshot: {
ApiKey: {
Title: "Moonshot-nøgle",
SubTitle: "Din egen Moonshot-nøgle",
Placeholder: "Moonshot API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
DeepSeek: {
ApiKey: {
Title: "DeepSeek-nøgle",
SubTitle: "Din egen DeepSeek-nøgle",
Placeholder: "DeepSeek API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
XAI: {
ApiKey: {
Title: "XAI-nøgle",
SubTitle: "Din egen XAI-nøgle",
Placeholder: "XAI API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
ChatGLM: {
ApiKey: {
Title: "ChatGLM-nøgle",
SubTitle: "Din egen ChatGLM-nøgle",
Placeholder: "ChatGLM API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
SiliconFlow: {
ApiKey: {
Title: "SiliconFlow-nøgle",
SubTitle: "Din egen SiliconFlow-nøgle",
Placeholder: "SiliconFlow API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
Stability: {
ApiKey: {
Title: "Stability-nøgle",
SubTitle: "Din egen Stability-nøgle",
Placeholder: "Stability API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
Iflytek: {
ApiKey: {
Title: "Iflytek API Key",
SubTitle: "Nøgle fra Iflytek",
Placeholder: "Iflytek API Key",
},
ApiSecret: {
Title: "Iflytek hemmelig nøgle",
SubTitle: "Hentet fra Iflytek",
Placeholder: "Iflytek API Secret",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
},
CustomModel: {
Title: "Egne modelnavne",
SubTitle: "Skriv komma-adskilte navne",
},
Google: {
ApiKey: {
Title: "Google-nøgle",
SubTitle: "Få din nøgle hos Google AI",
Placeholder: "Google AI API Key",
},
Endpoint: {
Title: "Adresse",
SubTitle: "F.eks.: ",
},
ApiVersion: {
Title: "API-version (til gemini-pro)",
SubTitle: "Vælg en bestemt version",
},
GoogleSafetySettings: {
Title: "Google sikkerhedsindstillinger",
SubTitle: "Vælg et niveau for indholdskontrol",
},
},
},
Model: "Model",
CompressModel: {
Title: "Opsummeringsmodel",
SubTitle: "Bruges til at korte historik ned og lave titel",
},
Temperature: {
Title: "Temperatur",
SubTitle: "Jo højere tal, jo mere kreativt svar",
},
TopP: {
Title: "Top P",
SubTitle: "Skal ikke ændres sammen med temperatur",
},
MaxTokens: {
Title: "Maks. længde",
SubTitle: "Hvor mange tokens (ord/stykker tekst) der kan bruges",
},
PresencePenalty: {
Title: "Nye emner",
SubTitle: "Jo højere tal, jo mere nyt indhold",
},
FrequencyPenalty: {
Title: "Gentagelsesstraf",
SubTitle: "Jo højere tal, jo mindre gentagelse",
},
TTS: {
Enable: {
Title: "Tænd for oplæsning (TTS)",
SubTitle: "Slå tekst-til-tale til",
},
Autoplay: {
Title: "Automatisk oplæsning",
SubTitle: "Laver lyd automatisk, hvis TTS er slået til",
},
Model: "Model",
Voice: {
Title: "Stemme",
SubTitle: "Hvilken stemme der bruges til lyd",
},
Speed: {
Title: "Hastighed",
SubTitle: "Hvor hurtigt der oplæses",
},
Engine: "TTS-motor",
},
Realtime: {
Enable: {
Title: "Live-chat",
SubTitle: "Slå live-svar til",
},
Provider: {
Title: "Modeludbyder",
SubTitle: "Vælg forskellig udbyder",
},
Model: {
Title: "Model",
SubTitle: "Vælg en model",
},
ApiKey: {
Title: "API-nøgle",
SubTitle: "Din nøgle",
Placeholder: "API-nøgle",
},
Azure: {
Endpoint: {
Title: "Adresse",
SubTitle: "Endpoint til Azure",
},
Deployment: {
Title: "Udrulningsnavn",
SubTitle: "Navn for dit Azure-setup",
},
},
Temperature: {
Title: "Temperatur",
SubTitle: "Højere tal = mere varierede svar",
},
},
},
Store: {
DefaultTopic: "Ny samtale",
BotHello: "Hej! Hvordan kan jeg hjælpe dig i dag?",
Error: "Noget gik galt. Prøv igen senere.",
Prompt: {
History: (content: string) =>
"Her er et kort resume af, hvad vi har snakket om: " + content,
Topic:
"Find en kort overskrift med 4-5 ord om emnet. Ingen tegnsætning eller anførselstegn.",
Summarize:
"Skriv et kort resumé (under 200 ord) af vores samtale til senere brug.",
},
},
Copy: {
Success: "Kopieret",
Failed: "Kunne ikke kopiere. Giv adgang til udklipsholder.",
},
Download: {
Success: "Filen er downloadet.",
Failed: "Download fejlede.",
},
Context: {
Toast: (x: any) => `Inkluderer ${x} ekstra prompts`,
Edit: "Chatindstillinger",
Add: "Tilføj prompt",
Clear: "Kontekst ryddet",
Revert: "Fortryd",
},
Discovery: {
Name: "Søgning og plugins",
},
Mcp: {
Name: "MCP",
},
FineTuned: {
Sysmessage: "Du er en hjælper, der skal...",
},
SearchChat: {
Name: "Søg",
Page: {
Title: "Søg i tidligere chats",
Search: "Skriv her for at søge",
NoResult: "Ingen resultater",
NoData: "Ingen data",
Loading: "Henter...",
SubTitle: (count: number) => `Fandt ${count} resultater`,
},
Item: {
View: "Vis",
},
},
Plugin: {
Name: "Plugin",
Page: {
Title: "Plugins",
SubTitle: (count: number) => `${count} plugins`,
Search: "Søg plugin",
Create: "Opret nyt",
Find: "Du kan finde flere plugins på GitHub: ",
},
Item: {
Info: (count: number) => `${count} metode`,
View: "Vis",
Edit: "Rediger",
Delete: "Slet",
DeleteConfirm: "Vil du slette?",
},
Auth: {
None: "Ingen",
Basic: "Basic",
Bearer: "Bearer",
Custom: "Tilpasset",
CustomHeader: "Parameternavn",
Token: "Token",
Proxy: "Brug Proxy",
ProxyDescription: "Løs CORS-problemer med Proxy",
Location: "Sted",
LocationHeader: "Header",
LocationQuery: "Query",
LocationBody: "Body",
},
EditModal: {
Title: (readonly: boolean) =>
`Rediger Plugin ${readonly ? "(skrivebeskyttet)" : ""}`,
Download: "Download",
Auth: "Godkendelsestype",
Content: "OpenAPI Schema",
Load: "Hent fra URL",
Method: "Metode",
Error: "Fejl i OpenAPI Schema",
},
},
Mask: {
Name: "Persona",
Page: {
Title: "Prompts som personaer",
SubTitle: (count: number) => `${count} skabeloner`,
Search: "Søg skabeloner",
Create: "Opret ny",
},
Item: {
Info: (count: number) => `${count} prompts`,
Chat: "Chat",
View: "Vis",
Edit: "Rediger",
Delete: "Slet",
DeleteConfirm: "Vil du slette?",
},
EditModal: {
Title: (readonly: boolean) =>
`Rediger skabelon ${readonly ? "(skrivebeskyttet)" : ""}`,
Download: "Download",
Clone: "Klon",
},
Config: {
Avatar: "Chat-avatar",
Name: "Chat-navn",
Sync: {
Title: "Brug globale indstillinger",
SubTitle: "Gældende for denne chat",
Confirm: "Erstat nuværende indstillinger med globale?",
},
HideContext: {
Title: "Skjul ekstra prompts",
SubTitle: "Vis dem ikke på chat-skærmen",
},
Artifacts: {
Title: "Brug Artefakter",
SubTitle: "Gør det muligt at vise HTML-sider",
},
CodeFold: {
Title: "Fold kode sammen",
SubTitle: "Luk/åbn lange kodestykker automatisk",
},
Share: {
Title: "Del denne persona",
SubTitle: "Få et link til denne skabelon",
Action: "Kopiér link",
},
},
},
NewChat: {
Return: "Tilbage",
Skip: "Start straks",
Title: "Vælg en persona",
SubTitle: "Chat med den persona, du vælger",
More: "Se flere",
NotShow: "Vis ikke igen",
ConfirmNoShow:
"Er du sikker på, at du ikke vil se det igen? Du kan altid slå det til under indstillinger.",
},
UI: {
Confirm: "OK",
Cancel: "Fortryd",
Close: "Luk",
Create: "Opret",
Edit: "Rediger",
Export: "Eksporter",
Import: "Importér",
Sync: "Synk",
Config: "Konfigurer",
},
Exporter: {
Description: {
Title: "Kun beskeder efter sidste rydning vises",
},
Model: "Model",
Messages: "Beskeder",
Topic: "Emne",
Time: "Tid",
},
URLCommand: {
Code: "Så ud til, at der var en kode i linket. Vil du bruge den?",
Settings: "Så ud til, at der var indstillinger i linket. Vil du bruge dem?",
},
SdPanel: {
Prompt: "Prompt",
NegativePrompt: "Negativ prompt",
PleaseInput: (name: string) => `Indtast: ${name}`,
AspectRatio: "Billedformat",
ImageStyle: "Stil",
OutFormat: "Uddataformat",
AIModel: "AI-model",
ModelVersion: "Version",
Submit: "Send",
ParamIsRequired: (name: string) => `${name} er krævet`,
Styles: {
D3Model: "3d-model",
AnalogFilm: "analog-film",
Anime: "anime",
Cinematic: "cinematisk",
ComicBook: "tegneserie",
DigitalArt: "digital-art",
Enhance: "enhance",
FantasyArt: "fantasy-art",
Isometric: "isometric",
LineArt: "line-art",
LowPoly: "low-poly",
ModelingCompound: "modeling-compound",
NeonPunk: "neon-punk",
Origami: "origami",
Photographic: "fotografisk",
PixelArt: "pixel-art",
TileTexture: "tile-texture",
},
},
Sd: {
SubTitle: (count: number) => `${count} billeder`,
Actions: {
Params: "Se indstillinger",
Copy: "Kopiér prompt",
Delete: "Slet",
Retry: "Prøv igen",
ReturnHome: "Til forsiden",
History: "Historik",
},
EmptyRecord: "Ingen billeder endnu",
Status: {
Name: "Status",
Success: "Ok",
Error: "Fejl",
Wait: "Venter",
Running: "I gang",
},
Danger: {
Delete: "Vil du slette?",
},
GenerateParams: "Genereringsvalg",
Detail: "Detaljer",
},
};
export default da;

View File

@@ -2,6 +2,7 @@ import cn from "./cn";
import en from "./en"; import en from "./en";
import pt from "./pt"; import pt from "./pt";
import tw from "./tw"; import tw from "./tw";
import da from "./da";
import id from "./id"; import id from "./id";
import fr from "./fr"; import fr from "./fr";
import es from "./es"; import es from "./es";
@@ -30,6 +31,7 @@ const ALL_LANGS = {
en, en,
tw, tw,
pt, pt,
da,
jp, jp,
ko, ko,
id, id,
@@ -56,6 +58,7 @@ export const ALL_LANG_OPTIONS: Record<Lang, string> = {
en: "English", en: "English",
pt: "Português", pt: "Português",
tw: "繁體中文", tw: "繁體中文",
da: "Dansk",
jp: "日本語", jp: "日本語",
ko: "한국어", ko: "한국어",
id: "Indonesia", id: "Indonesia",
@@ -141,6 +144,7 @@ export const STT_LANG_MAP: Record<Lang, string> = {
en: "en-US", en: "en-US",
pt: "pt-BR", pt: "pt-BR",
tw: "zh-TW", tw: "zh-TW",
da: "da-DK",
jp: "ja-JP", jp: "ja-JP",
ko: "ko-KR", ko: "ko-KR",
id: "id-ID", id: "id-ID",

View File

@@ -2,7 +2,11 @@ import { useEffect, useState } from "react";
import { showToast } from "./components/ui-lib"; import { showToast } from "./components/ui-lib";
import Locale from "./locales"; import Locale from "./locales";
import { RequestMessage } from "./client/api"; import { RequestMessage } from "./client/api";
import { ServiceProvider } from "./constant"; import {
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
ServiceProvider,
} from "./constant";
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http"; // import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream"; import { fetch as tauriStreamFetch } from "./utils/stream";
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant"; import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
@@ -292,6 +296,20 @@ export function isDalle3(model: string) {
return "dall-e-3" === model; return "dall-e-3" === model;
} }
export function getTimeoutMSByModel(model: string) {
model = model.toLowerCase();
if (
model.startsWith("dall-e") ||
model.startsWith("dalle") ||
model.startsWith("o1") ||
model.startsWith("o3") ||
model.includes("deepseek-r") ||
model.includes("-thinking")
)
return REQUEST_TIMEOUT_MS_FOR_THINKING;
return REQUEST_TIMEOUT_MS;
}
export function getModelSizes(model: string): ModelSize[] { export function getModelSizes(model: string): ModelSize[] {
if (isDalle3(model)) { if (isDalle3(model)) {
return ["1024x1024", "1792x1024", "1024x1792"]; return ["1024x1024", "1792x1024", "1024x1792"];

View File

@@ -400,6 +400,7 @@ export function streamWithThink(
let responseRes: Response; let responseRes: Response;
let isInThinkingMode = false; let isInThinkingMode = false;
let lastIsThinking = false; let lastIsThinking = false;
let lastIsThinkingTagged = false; //between <think> and </think> tags
// animate response to make it looks smooth // animate response to make it looks smooth
function animateResponseText() { function animateResponseText() {
@@ -579,6 +580,23 @@ export function streamWithThink(
if (!chunk?.content || chunk.content.length === 0) { if (!chunk?.content || chunk.content.length === 0) {
return; return;
} }
// deal with <think> and </think> tags start
if (!chunk.isThinking) {
if (chunk.content.startsWith("<think>")) {
chunk.isThinking = true;
chunk.content = chunk.content.slice(7).trim();
lastIsThinkingTagged = true;
} else if (chunk.content.endsWith("</think>")) {
chunk.isThinking = false;
chunk.content = chunk.content.slice(0, -8).trim();
lastIsThinkingTagged = false;
} else if (lastIsThinkingTagged) {
chunk.isThinking = true;
}
}
// deal with <think> and </think> tags start
// Check if thinking mode changed // Check if thinking mode changed
const isThinkingChanged = lastIsThinking !== chunk.isThinking; const isThinkingChanged = lastIsThinking !== chunk.isThinking;
lastIsThinking = chunk.isThinking; lastIsThinking = chunk.isThinking;