Compare commits

..

18 Commits

Author SHA1 Message Date
river
377480b448 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-16 10:50:07 +08:00
river
8bd0d6a1a7 chore: Update NextChatAI domain from nextchat.dev to nextchat.club 2025-02-16 10:48:54 +08:00
RiverRay
12863f5213 Merge pull request #6204 from bestsanmao/ali_bytedance_reasoning_content
add 3 type of reasoning_content support (+deepseek-r1@OpenAI @Alibaba @ByteDance), parse <think></think> from SSE
2025-02-13 14:53:47 +08:00
suruiqiang
cf140d4228 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content 2025-02-12 17:54:50 +08:00
suruiqiang
476d946f96 fix bug (trim eats space or \n mistakenly), optimize timeout by model 2025-02-12 17:49:54 +08:00
suruiqiang
9714258322 support deepseek-r1@OpenAI's reasoning_content, parse <think></think> from stream 2025-02-11 18:57:16 +08:00
RiverRay
48cd4b11b5 Merge pull request #6190 from siliconflow/refine-emoji-siliconflow
Fix model icon on SiliconFlow
2025-02-11 18:37:47 +08:00
RiverRay
77c78b230a Merge pull request #6193 from siliconflow/get-models-siliconflow
Model listing of SiliconFlow
2025-02-11 18:37:22 +08:00
RiverRay
b44686b887 Merge pull request #6189 from bestsanmao/bug_fix
fix avatar for export message preview and saved image
2025-02-11 18:36:50 +08:00
RiverRay
34bdd4b945 Merge pull request #6194 from siliconflow/vl-support-on-sf
Support VLM on SiliconFlow
2025-02-11 18:35:02 +08:00
suruiqiang
b0758cccde optimization 2025-02-11 16:08:30 +08:00
suruiqiang
98a11e56d2 support alibaba and bytedance's reasoning_content 2025-02-11 12:46:46 +08:00
Shenghang Tsai
86f86962fb Support VLM on SiliconFlow 2025-02-10 13:39:06 +08:00
Shenghang Tsai
2137aa65bf Model listing of SiliconFlow 2025-02-10 11:03:49 +08:00
Shenghang Tsai
18fa2cc30d fix model icon on siliconflow 2025-02-09 18:49:26 +08:00
Shenghang Tsai
0bfc648085 fix model icon on siliconflow 2025-02-09 18:47:57 +08:00
suruiqiang
9f91c2d05c fix avatar for export message preview and saved image 2025-02-09 16:52:46 +08:00
RiverRay
a029b4330b Merge pull request #6188 from ChatGPTNextWeb/Leizhenpeng-patch-4
Update LICENSE
2025-02-09 11:05:43 +08:00
19 changed files with 396 additions and 330 deletions

View File

@@ -1,6 +1,6 @@
<div align="center"> <div align="center">
<a href='https://nextchat.dev/chat'> <a href='https://nextchat.club'>
<img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/> <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a> </a>
@@ -23,9 +23,10 @@ English / [简体中文](./README_CN.md)
[![Linux][Linux-image]][download-url] [![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) [NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[saas-url]: https://nextchat.dev/chat?utm_source=readme [saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.dev/ [web-url]: https://app.nextchat.dev/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases

View File

@@ -8,7 +8,7 @@
一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。 一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

View File

@@ -5,7 +5,7 @@
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)

View File

@@ -14,8 +14,12 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
if (config.disableGPT4) { if (config.disableGPT4) {
remoteModelRes.data = remoteModelRes.data.filter( remoteModelRes.data = remoteModelRes.data.filter(
(m) => (m) =>
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1") || m.id.startsWith("o3")) || !(
m.id.startsWith("gpt-4o-mini"), m.id.startsWith("gpt-4") ||
m.id.startsWith("chatgpt-4o") ||
m.id.startsWith("o1") ||
m.id.startsWith("o3")
) || m.id.startsWith("gpt-4o-mini"),
); );
} }

View File

@@ -1,12 +1,13 @@
"use client"; "use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
Alibaba, useAppConfig,
ALIBABA_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -15,14 +16,12 @@ import {
SpeechOptions, SpeechOptions,
MultimodalContent, MultimodalContent,
} from "../api"; } from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -92,7 +91,10 @@ export class QwenApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role, role: v.role,
content: getMessageTextContent(v), content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
})); }));
const modelConfig = { const modelConfig = {
@@ -122,134 +124,118 @@ export class QwenApi implements LLMApi {
options.onController?.(controller); options.onController?.(controller);
try { try {
const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath); const chatPath = this.path(Alibaba.ChatPath);
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
signal: controller.signal, signal: controller.signal,
headers: { headers: headers,
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
},
}; };
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
headers,
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) { const tool_calls = choices[0]?.message?.tool_calls;
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); if (tool_calls?.length > 0) {
const fetchText = remainText.slice(0, fetchCount); const index = tool_calls[0]?.index;
responseText += fetchText; const id = tool_calls[0]?.id;
remainText = remainText.slice(fetchCount); const args = tool_calls[0]?.function?.arguments;
options.onUpdate?.(responseText, fetchText); if (id) {
} runTools.push({
id,
requestAnimationFrame(animateResponseText); type: tool_calls[0]?.type,
} function: {
name: tool_calls[0]?.function?.name as string,
// start animaion arguments: args,
animateResponseText(); },
});
const finish = () => { } else {
if (!finished) { // @ts-ignore
finished = true; runTools[index]["function"]["arguments"] += args;
options.onFinish(responseText + remainText, responseRes); }
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Alibaba] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.output.choices as Array<{ isThinking: false,
message: { content: string }; content: content,
}>; };
const delta = choices[0]?.message?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
ApiPath,
Baidu,
BAIDU_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu"; import { getAccessToken } from "@/app/utils/baidu";
@@ -23,7 +18,7 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -1,11 +1,12 @@
"use client"; "use client";
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
ByteDance, useAppConfig,
BYTEDANCE_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { import {
ChatOptions, ChatOptions,
@@ -15,14 +16,14 @@ import {
MultimodalContent, MultimodalContent,
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales";
import { import { streamWithThink } from "@/app/utils/chat";
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import {
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@@ -34,7 +35,7 @@ export interface OpenAIListModelResponse {
}>; }>;
} }
interface RequestPayload { interface RequestPayloadForByteDance {
messages: { messages: {
role: "system" | "user" | "assistant"; role: "system" | "user" | "assistant";
content: string | MultimodalContent[]; content: string | MultimodalContent[];
@@ -86,7 +87,10 @@ export class DoubaoApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
const content = await preProcessImageContent(v.content); const content =
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: await preProcessImageContent(v.content);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
@@ -99,7 +103,7 @@ export class DoubaoApi implements LLMApi {
}; };
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = { const requestPayload: RequestPayloadForByteDance = {
messages, messages,
stream: shouldStream, stream: shouldStream,
model: modelConfig.model, model: modelConfig.model,
@@ -124,119 +128,101 @@ export class DoubaoApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) { const tool_calls = choices[0]?.delta?.tool_calls;
responseText += remainText; if (tool_calls?.length > 0) {
console.log("[Response Animation] finished"); const index = tool_calls[0]?.index;
if (responseText?.length === 0) { const id = tool_calls[0]?.id;
options.onError?.(new Error("empty response from server")); const args = tool_calls[0]?.function?.arguments;
} if (id) {
return; runTools.push({
} id,
type: tool_calls[0]?.type,
if (remainText.length > 0) { function: {
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); name: tool_calls[0]?.function?.name as string,
const fetchText = remainText.slice(0, fetchCount); arguments: args,
responseText += fetchText; },
remainText = remainText.slice(fetchCount); });
options.onUpdate?.(responseText, fetchText); } else {
} // @ts-ignore
runTools[index]["function"]["arguments"] += args;
requestAnimationFrame(animateResponseText); }
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[ByteDance] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.choices as Array<{ isThinking: false,
delta: { content: string }; content: content,
}>; };
const delta = choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayloadForByteDance,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.messages?.splice(
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@@ -1,12 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
ApiPath,
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -116,16 +111,10 @@ export class DeepSeekApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
// console.log(chatPayload);
const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
@@ -176,8 +165,8 @@ export class DeepSeekApi implements LLMApi {
// Skip if both content and reasoning_content are empty or null // Skip if both content and reasoning_content are empty or null
if ( if (
(!reasoning || reasoning.trim().length === 0) && (!reasoning || reasoning.length === 0) &&
(!content || content.trim().length === 0) (!content || content.length === 0)
) { ) {
return { return {
isThinking: false, isThinking: false,
@@ -185,12 +174,12 @@ export class DeepSeekApi implements LLMApi {
}; };
} }
if (reasoning && reasoning.trim().length > 0) { if (reasoning && reasoning.length > 0) {
return { return {
isThinking: true, isThinking: true,
content: reasoning, content: reasoning,
}; };
} else if (content && content.trim().length > 0) { } else if (content && content.length > 0) {
return { return {
isThinking: false, isThinking: false,
content: content, content: content,

View File

@@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
ApiPath,
CHATGLM_BASE_URL,
ChatGLM,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -21,7 +16,11 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
@@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi {
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (modelType === "image" || modelType === "video") { if (modelType === "image" || modelType === "video") {

View File

@@ -1,9 +1,4 @@
import { import { ApiPath, Google } from "@/app/constant";
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -27,6 +22,7 @@ import {
getMessageTextContent, getMessageTextContent,
getMessageImages, getMessageImages,
isVisionModel, isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid"; import { nanoid } from "nanoid";
@@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -8,7 +8,6 @@ import {
Azure, Azure,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
ServiceProvider, ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
ChatMessageTool, ChatMessageTool,
@@ -22,7 +21,7 @@ import {
preProcessImageContent, preProcessImageContent,
uploadImage, uploadImage,
base64Image2Blob, base64Image2Blob,
stream, streamWithThink,
} from "@/app/utils/chat"; } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
@@ -42,6 +41,7 @@ import {
getMessageTextContent, getMessageTextContent,
isVisionModel, isVisionModel,
isDalle3 as _isDalle3, isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -294,7 +294,7 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [], useChatStore.getState().currentSession().mask?.plugin || [],
); );
// console.log("getAsTools", tools, funcs); // console.log("getAsTools", tools, funcs);
stream( streamWithThink(
chatPath, chatPath,
requestPayload, requestPayload,
getHeaders(), getHeaders(),
@@ -309,8 +309,12 @@ export class ChatGPTApi implements LLMApi {
delta: { delta: {
content: string; content: string;
tool_calls: ChatMessageTool[]; tool_calls: ChatMessageTool[];
reasoning_content: string | null;
}; };
}>; }>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.delta?.tool_calls; const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) { if (tool_calls?.length > 0) {
const id = tool_calls[0]?.id; const id = tool_calls[0]?.id;
@@ -330,7 +334,37 @@ export class ChatGPTApi implements LLMApi {
runTools[index]["function"]["arguments"] += args; runTools[index]["function"]["arguments"] += args;
} }
} }
return choices[0]?.delta?.content;
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
}, },
// processToolMessage, include tool_calls message and tool call results // processToolMessage, include tool_calls message and tool call results
( (
@@ -362,9 +396,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 || isO1OrO3 getTimeoutMSByModel(options.config.model),
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);

View File

@@ -4,7 +4,7 @@ import {
ApiPath, ApiPath,
SILICONFLOW_BASE_URL, SILICONFLOW_BASE_URL,
SiliconFlow, SiliconFlow,
REQUEST_TIMEOUT_MS_FOR_THINKING, DEFAULT_MODELS,
} from "@/app/constant"; } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
@@ -13,7 +13,7 @@ import {
ChatMessageTool, ChatMessageTool,
usePluginStore, usePluginStore,
} from "@/app/store"; } from "@/app/store";
import { streamWithThink } from "@/app/utils/chat"; import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@@ -25,12 +25,23 @@ import { getClientConfig } from "@/app/config/client";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface SiliconFlowListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
export class SiliconflowApi implements LLMApi { export class SiliconflowApi implements LLMApi {
private disableListModels = true; private disableListModels = false;
path(path: string): string { path(path: string): string {
const accessStore = useAccessStore.getState(); const accessStore = useAccessStore.getState();
@@ -71,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
} }
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
if (v.role === "assistant") { if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v); const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} else { } else {
const content = getMessageTextContent(v); const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
} }
@@ -123,7 +137,7 @@ export class SiliconflowApi implements LLMApi {
// Use extended timeout for thinking models as they typically require more processing time // Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS_FOR_THINKING, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
@@ -238,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
} }
async models(): Promise<LLMModel[]> { async models(): Promise<LLMModel[]> {
return []; if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});
const resJson = (await res.json()) as SiliconFlowListModelResponse;
const chatModels = resJson.data;
console.log("[Models]", chatModels);
if (!chatModels) {
return [];
}
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
}));
} }
} }

View File

@@ -1,5 +1,5 @@
"use client"; "use client";
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { import {
@@ -17,7 +17,11 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import mapKeys from "lodash-es/mapKeys"; import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues"; import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray"; import isArray from "lodash-es/isArray";
@@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -1,6 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@@ -17,6 +17,7 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@@ -66,11 +66,11 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGemma; LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) { } else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude; LlmIcon = BotIconClaude;
} else if (modelName.startsWith("llama")) { } else if (modelName.toLowerCase().includes("llama")) {
LlmIcon = BotIconMeta; LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral")) { } else if (modelName.startsWith("mixtral")) {
LlmIcon = BotIconMistral; LlmIcon = BotIconMistral;
} else if (modelName.startsWith("deepseek")) { } else if (modelName.toLowerCase().includes("deepseek")) {
LlmIcon = BotIconDeepseek; LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) { } else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot; LlmIcon = BotIconMoonshot;
@@ -85,7 +85,7 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao; LlmIcon = BotIconDoubao;
} else if ( } else if (
modelName.startsWith("glm") || modelName.toLowerCase().includes("glm") ||
modelName.startsWith("cogview-") || modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-") modelName.startsWith("cogvideox-")
) { ) {

View File

@@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import ChatGptIcon from "../icons/chatgpt.png"; import ChatGptIcon from "../icons/chatgpt.png";
import ShareIcon from "../icons/share.svg"; import ShareIcon from "../icons/share.svg";
import BotIcon from "../icons/bot.png";
import DownloadIcon from "../icons/download.svg"; import DownloadIcon from "../icons/download.svg";
import { useEffect, useMemo, useRef, useState } from "react"; import { useEffect, useMemo, useRef, useState } from "react";
@@ -33,13 +32,13 @@ import dynamic from "next/dynamic";
import NextImage from "next/image"; import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image"; import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
import { prettyObject } from "../utils/format"; import { prettyObject } from "../utils/format";
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { type ClientApi, getClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { getMessageTextContent } from "../utils"; import { getMessageTextContent } from "../utils";
import { MaskAvatar } from "./mask";
import clsx from "clsx"; import clsx from "clsx";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
@@ -407,22 +406,6 @@ export function PreviewActions(props: {
); );
} }
function ExportAvatar(props: { avatar: string }) {
if (props.avatar === DEFAULT_MASK_AVATAR) {
return (
<img
src={BotIcon.src}
width={30}
height={30}
alt="bot"
className="user-avatar"
/>
);
}
return <Avatar avatar={props.avatar} />;
}
export function ImagePreviewer(props: { export function ImagePreviewer(props: {
messages: ChatMessage[]; messages: ChatMessage[];
topic: string; topic: string;
@@ -546,9 +529,12 @@ export function ImagePreviewer(props: {
github.com/ChatGPTNextWeb/ChatGPT-Next-Web github.com/ChatGPTNextWeb/ChatGPT-Next-Web
</div> </div>
<div className={styles["icons"]}> <div className={styles["icons"]}>
<ExportAvatar avatar={config.avatar} /> <MaskAvatar avatar={config.avatar} />
<span className={styles["icon-space"]}>&</span> <span className={styles["icon-space"]}>&</span>
<ExportAvatar avatar={mask.avatar} /> <MaskAvatar
avatar={mask.avatar}
model={session.mask.modelConfig.model}
/>
</div> </div>
</div> </div>
<div> <div>
@@ -576,9 +562,14 @@ export function ImagePreviewer(props: {
key={i} key={i}
> >
<div className={styles["avatar"]}> <div className={styles["avatar"]}>
<ExportAvatar {m.role === "user" ? (
avatar={m.role === "user" ? config.avatar : mask.avatar} <Avatar avatar={config.avatar}></Avatar>
/> ) : (
<MaskAvatar
avatar={session.mask.avatar}
model={m.model || session.mask.modelConfig.model}
/>
)}
</div> </div>
<div className={styles["body"]}> <div className={styles["body"]}>

View File

@@ -258,6 +258,7 @@ export const ChatGLM = {
export const SiliconFlow = { export const SiliconFlow = {
ExampleEndpoint: SILICONFLOW_BASE_URL, ExampleEndpoint: SILICONFLOW_BASE_URL,
ChatPath: "v1/chat/completions", ChatPath: "v1/chat/completions",
ListModelPath: "v1/models?&sub_type=chat",
}; };
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
@@ -462,6 +463,7 @@ export const VISION_MODEL_REGEXES = [
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview" /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
/^dall-e-3$/, // Matches exactly "dall-e-3" /^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/, /glm-4v/,
/vl/i,
]; ];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -814,5 +816,5 @@ export const internalAllowedWebDavEndpoints = [
export const DEFAULT_GA_ID = "G-89WN60ZK2E"; export const DEFAULT_GA_ID = "G-89WN60ZK2E";
export const SAAS_CHAT_URL = "https://nextchat.dev/chat"; export const SAAS_CHAT_URL = "https://nextchat.club";
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github"; export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";

View File

@@ -2,7 +2,11 @@ import { useEffect, useState } from "react";
import { showToast } from "./components/ui-lib"; import { showToast } from "./components/ui-lib";
import Locale from "./locales"; import Locale from "./locales";
import { RequestMessage } from "./client/api"; import { RequestMessage } from "./client/api";
import { ServiceProvider } from "./constant"; import {
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
ServiceProvider,
} from "./constant";
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http"; // import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream"; import { fetch as tauriStreamFetch } from "./utils/stream";
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant"; import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
@@ -292,6 +296,20 @@ export function isDalle3(model: string) {
return "dall-e-3" === model; return "dall-e-3" === model;
} }
export function getTimeoutMSByModel(model: string) {
model = model.toLowerCase();
if (
model.startsWith("dall-e") ||
model.startsWith("dalle") ||
model.startsWith("o1") ||
model.startsWith("o3") ||
model.includes("deepseek-r") ||
model.includes("-thinking")
)
return REQUEST_TIMEOUT_MS_FOR_THINKING;
return REQUEST_TIMEOUT_MS;
}
export function getModelSizes(model: string): ModelSize[] { export function getModelSizes(model: string): ModelSize[] {
if (isDalle3(model)) { if (isDalle3(model)) {
return ["1024x1024", "1792x1024", "1024x1792"]; return ["1024x1024", "1792x1024", "1024x1792"];

View File

@@ -400,6 +400,7 @@ export function streamWithThink(
let responseRes: Response; let responseRes: Response;
let isInThinkingMode = false; let isInThinkingMode = false;
let lastIsThinking = false; let lastIsThinking = false;
let lastIsThinkingTagged = false; //between <think> and </think> tags
// animate response to make it looks smooth // animate response to make it looks smooth
function animateResponseText() { function animateResponseText() {
@@ -579,6 +580,23 @@ export function streamWithThink(
if (!chunk?.content || chunk.content.length === 0) { if (!chunk?.content || chunk.content.length === 0) {
return; return;
} }
// deal with <think> and </think> tags start
if (!chunk.isThinking) {
if (chunk.content.startsWith("<think>")) {
chunk.isThinking = true;
chunk.content = chunk.content.slice(7).trim();
lastIsThinkingTagged = true;
} else if (chunk.content.endsWith("</think>")) {
chunk.isThinking = false;
chunk.content = chunk.content.slice(0, -8).trim();
lastIsThinkingTagged = false;
} else if (lastIsThinkingTagged) {
chunk.isThinking = true;
}
}
// deal with <think> and </think> tags start
// Check if thinking mode changed // Check if thinking mode changed
const isThinkingChanged = lastIsThinking !== chunk.isThinking; const isThinkingChanged = lastIsThinking !== chunk.isThinking;
lastIsThinking = chunk.isThinking; lastIsThinking = chunk.isThinking;