merge main

This commit is contained in:
lloydzhou 2024-10-09 18:27:23 +08:00
commit 4ae34ea3ee
31 changed files with 417 additions and 154 deletions

View File

@ -63,7 +63,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
企业版咨询: **business@nextchat.dev** 企业版咨询: **business@nextchat.dev**
<img width="300" src="https://github.com/user-attachments/assets/3daeb7b6-ab63-4542-9141-2e4a12c80601"> <img width="300" src="https://github.com/user-attachments/assets/3d4305ac-6e95-489e-884b-51d51db5f692">
## Features ## Features
@ -100,6 +100,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
## What's New ## What's New
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.10.1 support Google Gemini Pro model.
@ -137,6 +138,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
## 最新动态 ## 最新动态
- 🚀 v2.15.4 客户端支持Tauri本地直接调用大模型API更安全[#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 现在支持 Artifacts & SD 了。 - 🚀 v2.14.0 现在支持 Artifacts & SD 了。
- 🚀 v2.10.1 现在支持 Gemini Pro 模型。 - 🚀 v2.10.1 现在支持 Gemini Pro 模型。
@ -332,9 +334,9 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model
User `-all` to disable all default models, `+all` to enable all default models. User `-all` to disable all default models, `+all` to enable all default models.
For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. > Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list. > If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.

View File

@ -216,9 +216,9 @@ ByteDance Api Url.
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
在Azure的模式下支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) 在Azure的模式下支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 > 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
> 如果你只能使用Azure模式那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` > 如果你只能使用Azure模式那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
在ByteDance的模式下支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) 在ByteDance的模式下支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 > 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项

View File

@ -207,8 +207,8 @@ ByteDance API の URL。
モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。 モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。
Azure モードでは、`modelName@azure=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。 Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例:`+gpt-3.5-turbo@azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 > 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。
ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。 ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。 > 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。

View File

@ -6,7 +6,7 @@ import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth"; import { auth } from "./auth";
import { requestOpenai } from "./common"; import { requestOpenai } from "./common";
const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
function getModels(remoteModelRes: OpenAIListModelResponse) { function getModels(remoteModelRes: OpenAIListModelResponse) {
const config = getServerSideConfig(); const config = getServerSideConfig();
@ -34,7 +34,7 @@ export async function handle(
const subpath = params.path.join("/"); const subpath = params.path.join("/");
if (!ALLOWD_PATH.has(subpath)) { if (!ALLOWED_PATH.has(subpath)) {
console.log("[OpenAI Route] forbidden path ", subpath); console.log("[OpenAI Route] forbidden path ", subpath);
return NextResponse.json( return NextResponse.json(
{ {

View File

@ -231,7 +231,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
function getConfig() { function getConfig() {
const modelConfig = chatStore.currentSession().mask.modelConfig; const modelConfig = chatStore.currentSession().mask.modelConfig;
const isGoogle = modelConfig.providerName == ServiceProvider.Google; const isGoogle = modelConfig.providerName === ServiceProvider.Google;
const isAzure = modelConfig.providerName === ServiceProvider.Azure; const isAzure = modelConfig.providerName === ServiceProvider.Azure;
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;

View File

@ -23,6 +23,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -178,6 +179,7 @@ export class QwenApi implements LLMApi {
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -8,7 +8,7 @@ import {
ChatMessageTool, ChatMessageTool,
} from "@/app/store"; } from "@/app/store";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant"; import { ANTHROPIC_BASE_URL } from "@/app/constant";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { preProcessImageContent, stream } from "@/app/utils/chat"; import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
@ -388,9 +388,7 @@ export class ClaudeApi implements LLMApi {
if (baseUrl.trim().length === 0) { if (baseUrl.trim().length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
? DEFAULT_API_HOST + "/api/proxy/anthropic"
: ApiPath.Anthropic;
} }
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {

View File

@ -24,6 +24,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -197,6 +198,7 @@ export class ErnieApi implements LLMApi {
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -23,6 +23,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -165,6 +166,7 @@ export class DoubaoApi implements LLMApi {
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -16,7 +16,7 @@ import {
} from "@/app/store"; } from "@/app/store";
import { stream } from "@/app/utils/chat"; import { stream } from "@/app/utils/chat";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant"; import { GEMINI_BASE_URL } from "@/app/constant";
import { import {
getMessageTextContent, getMessageTextContent,
@ -26,6 +26,7 @@ import {
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid"; import { nanoid } from "nanoid";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class GeminiProApi implements LLMApi { export class GeminiProApi implements LLMApi {
path(path: string): string { path(path: string): string {
@ -38,7 +39,7 @@ export class GeminiProApi implements LLMApi {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google; baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1); baseUrl = baseUrl.slice(0, baseUrl.length - 1);

View File

@ -1,7 +1,7 @@
"use client"; "use client";
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, IFLYTEK_BASE_URL,
Iflytek, Iflytek,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
} from "@/app/constant"; } from "@/app/constant";
@ -22,6 +22,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
@ -40,7 +41,7 @@ export class SparkApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Iflytek; const apiPath = ApiPath.Iflytek;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -149,6 +150,7 @@ export class SparkApi implements LLMApi {
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -2,7 +2,7 @@
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, MOONSHOT_BASE_URL,
Moonshot, Moonshot,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
} from "@/app/constant"; } from "@/app/constant";
@ -40,7 +40,7 @@ export class MoonshotApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Moonshot; const apiPath = ApiPath.Moonshot;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {

View File

@ -2,7 +2,7 @@
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, OPENAI_BASE_URL,
DEFAULT_MODELS, DEFAULT_MODELS,
OpenaiPath, OpenaiPath,
Azure, Azure,
@ -98,7 +98,7 @@ export class ChatGPTApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {

View File

@ -1,5 +1,5 @@
"use client"; "use client";
import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { import {
@ -22,6 +22,7 @@ import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues"; import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray"; import isArray from "lodash-es/isArray";
import isObject from "lodash-es/isObject"; import isObject from "lodash-es/isObject";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -70,9 +71,7 @@ export class HunyuanApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
? DEFAULT_API_HOST + "/api/proxy/tencent"
: ApiPath.Tencent;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -179,6 +178,7 @@ export class HunyuanApi implements LLMApi {
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -1815,6 +1815,7 @@ function _Chat() {
{message?.tools?.map((tool) => ( {message?.tools?.map((tool) => (
<div <div
key={tool.id} key={tool.id}
title={tool?.errorMsg}
className={styles["chat-message-tool"]} className={styles["chat-message-tool"]}
> >
{tool.isError === false ? ( {tool.isError === false ? (

View File

@ -207,23 +207,6 @@ function CustomCode(props: { children: any; className?: string }) {
); );
} }
function escapeDollarNumber(text: string) {
let escapedText = "";
for (let i = 0; i < text.length; i += 1) {
let char = text[i];
const nextChar = text[i + 1] || " ";
if (char === "$" && nextChar >= "0" && nextChar <= "9") {
char = "\\$";
}
escapedText += char;
}
return escapedText;
}
function escapeBrackets(text: string) { function escapeBrackets(text: string) {
const pattern = const pattern =
/(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g; /(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g;
@ -261,7 +244,7 @@ function tryWrapHtmlCode(text: string) {
function _MarkDownContent(props: { content: string }) { function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => { const escapedContent = useMemo(() => {
return tryWrapHtmlCode(escapeBrackets(escapeDollarNumber(props.content))); return tryWrapHtmlCode(escapeBrackets(props.content));
}, [props.content]); }, [props.content]);
return ( return (

View File

@ -11,7 +11,6 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
export const STABILITY_BASE_URL = "https://api.stability.ai"; export const STABILITY_BASE_URL = "https://api.stability.ai";
export const DEFAULT_API_HOST = "https://api.nextchat.dev";
export const OPENAI_BASE_URL = "https://api.openai.com"; export const OPENAI_BASE_URL = "https://api.openai.com";
export const ANTHROPIC_BASE_URL = "https://api.anthropic.com"; export const ANTHROPIC_BASE_URL = "https://api.anthropic.com";

View File

@ -1,9 +1,18 @@
import { import {
ApiPath,
DEFAULT_API_HOST,
GoogleSafetySettingsThreshold, GoogleSafetySettingsThreshold,
ServiceProvider, ServiceProvider,
StoreKey, StoreKey,
ApiPath,
OPENAI_BASE_URL,
ANTHROPIC_BASE_URL,
GEMINI_BASE_URL,
BAIDU_BASE_URL,
BYTEDANCE_BASE_URL,
ALIBABA_BASE_URL,
TENCENT_BASE_URL,
MOONSHOT_BASE_URL,
STABILITY_BASE_URL,
IFLYTEK_BASE_URL,
} from "../constant"; } from "../constant";
import { getHeaders } from "../client/api"; import { getHeaders } from "../client/api";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
@ -15,45 +24,25 @@ let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
const isApp = getClientConfig()?.buildMode === "export"; const isApp = getClientConfig()?.buildMode === "export";
const DEFAULT_OPENAI_URL = isApp const DEFAULT_OPENAI_URL = isApp ? OPENAI_BASE_URL : ApiPath.OpenAI;
? DEFAULT_API_HOST + "/api/proxy/openai"
: ApiPath.OpenAI;
const DEFAULT_GOOGLE_URL = isApp const DEFAULT_GOOGLE_URL = isApp ? GEMINI_BASE_URL : ApiPath.Google;
? DEFAULT_API_HOST + "/api/proxy/google"
: ApiPath.Google;
const DEFAULT_ANTHROPIC_URL = isApp const DEFAULT_ANTHROPIC_URL = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
? DEFAULT_API_HOST + "/api/proxy/anthropic"
: ApiPath.Anthropic;
const DEFAULT_BAIDU_URL = isApp const DEFAULT_BAIDU_URL = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
? DEFAULT_API_HOST + "/api/proxy/baidu"
: ApiPath.Baidu;
const DEFAULT_BYTEDANCE_URL = isApp const DEFAULT_BYTEDANCE_URL = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance;
? DEFAULT_API_HOST + "/api/proxy/bytedance"
: ApiPath.ByteDance;
const DEFAULT_ALIBABA_URL = isApp const DEFAULT_ALIBABA_URL = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
? DEFAULT_API_HOST + "/api/proxy/alibaba"
: ApiPath.Alibaba;
const DEFAULT_TENCENT_URL = isApp const DEFAULT_TENCENT_URL = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
? DEFAULT_API_HOST + "/api/proxy/tencent"
: ApiPath.Tencent;
const DEFAULT_MOONSHOT_URL = isApp const DEFAULT_MOONSHOT_URL = isApp ? MOONSHOT_BASE_URL : ApiPath.Moonshot;
? DEFAULT_API_HOST + "/api/proxy/moonshot"
: ApiPath.Moonshot;
const DEFAULT_STABILITY_URL = isApp const DEFAULT_STABILITY_URL = isApp ? STABILITY_BASE_URL : ApiPath.Stability;
? DEFAULT_API_HOST + "/api/proxy/stability"
: ApiPath.Stability;
const DEFAULT_IFLYTEK_URL = isApp const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek;
? DEFAULT_API_HOST + "/api/proxy/iflytek"
: ApiPath.Iflytek;
const DEFAULT_ACCESS_STATE = { const DEFAULT_ACCESS_STATE = {
accessCode: "", accessCode: "",

View File

@ -16,6 +16,9 @@ import {
DEFAULT_SYSTEM_TEMPLATE, DEFAULT_SYSTEM_TEMPLATE,
KnowledgeCutOffDate, KnowledgeCutOffDate,
StoreKey, StoreKey,
SUMMARIZE_MODEL,
GEMINI_SUMMARIZE_MODEL,
ServiceProvider,
} from "../constant"; } from "../constant";
import Locale, { getLang } from "../locales"; import Locale, { getLang } from "../locales";
import { isDalle3, safeLocalStorage } from "../utils"; import { isDalle3, safeLocalStorage } from "../utils";
@ -23,6 +26,8 @@ import { prettyObject } from "../utils/format";
import { createPersistStore } from "../utils/store"; import { createPersistStore } from "../utils/store";
import { estimateTokenLength } from "../utils/token"; import { estimateTokenLength } from "../utils/token";
import { ModelConfig, ModelType, useAppConfig } from "./config"; import { ModelConfig, ModelType, useAppConfig } from "./config";
import { useAccessStore } from "./access";
import { collectModelsWithDefaultModel } from "../utils/model";
import { createEmptyMask, Mask } from "./mask"; import { createEmptyMask, Mask } from "./mask";
const localStorage = safeLocalStorage(); const localStorage = safeLocalStorage();
@ -37,6 +42,7 @@ export type ChatMessageTool = {
}; };
content?: string; content?: string;
isError?: boolean; isError?: boolean;
errorMsg?: string;
}; };
export type ChatMessage = RequestMessage & { export type ChatMessage = RequestMessage & {
@ -102,6 +108,35 @@ function createEmptySession(): ChatSession {
}; };
} }
function getSummarizeModel(
currentModel: string,
providerName: string,
): string[] {
// if it is using gpt-* models, force to use 4o-mini to summarize
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
const configStore = useAppConfig.getState();
const accessStore = useAccessStore.getState();
const allModel = collectModelsWithDefaultModel(
configStore.models,
[configStore.customModels, accessStore.customModels].join(","),
accessStore.defaultModel,
);
const summarizeModel = allModel.find(
(m) => m.name === SUMMARIZE_MODEL && m.available,
);
if (summarizeModel) {
return [
summarizeModel.name,
summarizeModel.provider?.providerName as string,
];
}
}
if (currentModel.startsWith("gemini")) {
return [GEMINI_SUMMARIZE_MODEL, ServiceProvider.Google];
}
return [currentModel, providerName];
}
function countMessages(msgs: ChatMessage[]) { function countMessages(msgs: ChatMessage[]) {
return msgs.reduce( return msgs.reduce(
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)), (pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
@ -578,8 +613,14 @@ export const useChatStore = createPersistStore(
return; return;
} }
const providerName = modelConfig.compressProviderName; // if not config compressModel, then using getSummarizeModel
const api: ClientApi = getClientApi(providerName); const [model, providerName] = modelConfig.compressModel
? [modelConfig.compressModel, modelConfig.compressProviderName]
: getSummarizeModel(
session.mask.modelConfig.model,
session.mask.modelConfig.providerName,
);
const api: ClientApi = getClientApi(providerName as ServiceProvider);
// remove error messages if any // remove error messages if any
const messages = session.messages; const messages = session.messages;
@ -610,7 +651,7 @@ export const useChatStore = createPersistStore(
api.llm.chat({ api.llm.chat({
messages: topicMessages, messages: topicMessages,
config: { config: {
model: modelConfig.compressModel, model,
stream: false, stream: false,
providerName, providerName,
}, },
@ -674,7 +715,8 @@ export const useChatStore = createPersistStore(
config: { config: {
...modelcfg, ...modelcfg,
stream: true, stream: true,
model: modelConfig.compressModel, model,
providerName,
}, },
onUpdate(message) { onUpdate(message) {
session.memoryPrompt = message; session.memoryPrompt = message;
@ -727,7 +769,7 @@ export const useChatStore = createPersistStore(
}, },
{ {
name: StoreKey.Chat, name: StoreKey.Chat,
version: 3.2, version: 3.3,
migrate(persistedState, version) { migrate(persistedState, version) {
const state = persistedState as any; const state = persistedState as any;
const newState = JSON.parse( const newState = JSON.parse(
@ -783,6 +825,14 @@ export const useChatStore = createPersistStore(
config.modelConfig.compressProviderName; config.modelConfig.compressProviderName;
}); });
} }
// revert default summarize model for every session
if (version < 3.3) {
newState.sessions.forEach((s) => {
const config = useAppConfig.getState();
s.mask.modelConfig.compressModel = "";
s.mask.modelConfig.compressProviderName = "";
});
}
return newState as any; return newState as any;
}, },

View File

@ -71,8 +71,8 @@ export const DEFAULT_CONFIG = {
sendMemory: true, sendMemory: true,
historyMessageCount: 4, historyMessageCount: 4,
compressMessageLengthThreshold: 1000, compressMessageLengthThreshold: 1000,
compressModel: "gpt-4o-mini" as ModelType, compressModel: "",
compressProviderName: "OpenAI" as ServiceProvider, compressProviderName: "",
enableInjectSystemPrompts: true, enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE, template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
size: "1024x1024" as DalleSize, size: "1024x1024" as DalleSize,
@ -178,7 +178,7 @@ export const useAppConfig = createPersistStore(
}), }),
{ {
name: StoreKey.Config, name: StoreKey.Config,
version: 4, version: 4.1,
merge(persistedState, currentState) { merge(persistedState, currentState) {
const state = persistedState as ChatConfig | undefined; const state = persistedState as ChatConfig | undefined;
@ -231,7 +231,7 @@ export const useAppConfig = createPersistStore(
: config?.template ?? DEFAULT_INPUT_TEMPLATE; : config?.template ?? DEFAULT_INPUT_TEMPLATE;
} }
if (version < 4) { if (version < 4.1) {
state.modelConfig.compressModel = state.modelConfig.compressModel =
DEFAULT_CONFIG.modelConfig.compressModel; DEFAULT_CONFIG.modelConfig.compressModel;
state.modelConfig.compressProviderName = state.modelConfig.compressProviderName =

View File

@ -7,7 +7,7 @@ import yaml from "js-yaml";
import { adapter, getOperationId } from "../utils"; import { adapter, getOperationId } from "../utils";
import { useAccessStore } from "./access"; import { useAccessStore } from "./access";
const isApp = getClientConfig()?.isApp; const isApp = getClientConfig()?.isApp !== false;
export type Plugin = { export type Plugin = {
id: string; id: string;

View File

@ -12,7 +12,6 @@ import { downloadAs, readFromFile } from "../utils";
import { showToast } from "../components/ui-lib"; import { showToast } from "../components/ui-lib";
import Locale from "../locales"; import Locale from "../locales";
import { createSyncClient, ProviderType } from "../utils/cloud"; import { createSyncClient, ProviderType } from "../utils/cloud";
import { corsPath } from "../utils/cors";
export interface WebDavConfig { export interface WebDavConfig {
server: string; server: string;
@ -26,7 +25,7 @@ export type SyncStore = GetStoreState<typeof useSyncStore>;
const DEFAULT_SYNC_STATE = { const DEFAULT_SYNC_STATE = {
provider: ProviderType.WebDAV, provider: ProviderType.WebDAV,
useProxy: true, useProxy: true,
proxyUrl: corsPath(ApiPath.Cors), proxyUrl: ApiPath.Cors as string,
webdav: { webdav: {
endpoint: "", endpoint: "",

View File

@ -2,8 +2,9 @@ import { useEffect, useState } from "react";
import { showToast } from "./components/ui-lib"; import { showToast } from "./components/ui-lib";
import Locale from "./locales"; import Locale from "./locales";
import { RequestMessage } from "./client/api"; import { RequestMessage } from "./client/api";
import { ServiceProvider, REQUEST_TIMEOUT_MS } from "./constant"; import { ServiceProvider } from "./constant";
import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http"; // import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream";
export function trimTopic(topic: string) { export function trimTopic(topic: string) {
// Fix an issue where double quotes still show in the Indonesian language // Fix an issue where double quotes still show in the Indonesian language
@ -295,30 +296,23 @@ export function fetch(
options?: Record<string, unknown>, options?: Record<string, unknown>,
): Promise<any> { ): Promise<any> {
if (window.__TAURI__) { if (window.__TAURI__) {
const payload = options?.body || options?.data; return tauriStreamFetch(url, options);
return tauriFetch(url, {
...options,
body:
payload &&
({
type: "Text",
payload,
} as any),
timeout: ((options?.timeout as number) || REQUEST_TIMEOUT_MS) / 1000,
responseType:
options?.responseType == "text" ? ResponseType.Text : ResponseType.JSON,
} as any);
} }
return window.fetch(url, options); return window.fetch(url, options);
} }
export function adapter(config: Record<string, unknown>) { export function adapter(config: Record<string, unknown>) {
const { baseURL, url, params, ...rest } = config; const { baseURL, url, params, data: body, ...rest } = config;
const path = baseURL ? `${baseURL}${url}` : url; const path = baseURL ? `${baseURL}${url}` : url;
const fetchUrl = params const fetchUrl = params
? `${path}?${new URLSearchParams(params as any).toString()}` ? `${path}?${new URLSearchParams(params as any).toString()}`
: path; : path;
return fetch(fetchUrl as string, { ...rest, responseType: "text" }); return fetch(fetchUrl as string, { ...rest, body }).then((res) => {
const { status, headers, statusText } = res;
return res
.text()
.then((data: string) => ({ status, statusText, headers, data }));
});
} }
export function safeLocalStorage(): { export function safeLocalStorage(): {

View File

@ -10,6 +10,7 @@ import {
fetchEventSource, fetchEventSource,
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "./format"; import { prettyObject } from "./format";
import { fetch as tauriFetch } from "./stream";
export function compressImage(file: Blob, maxSize: number): Promise<string> { export function compressImage(file: Blob, maxSize: number): Promise<string> {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
@ -221,7 +222,7 @@ export function stream(
), ),
) )
.then((res) => { .then((res) => {
const content = JSON.stringify(res.data); let content = res.data || res?.statusText;
if (res.status >= 300) { if (res.status >= 300) {
return Promise.reject(content); return Promise.reject(content);
} }
@ -236,7 +237,11 @@ export function stream(
return content; return content;
}) })
.catch((e) => { .catch((e) => {
options?.onAfterTool?.({ ...tool, isError: true }); options?.onAfterTool?.({
...tool,
isError: true,
errorMsg: e.toString(),
});
return e.toString(); return e.toString();
}) })
.then((content) => ({ .then((content) => ({
@ -288,6 +293,7 @@ export function stream(
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
); );
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: tauriFetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -1,19 +0,0 @@
import { getClientConfig } from "../config/client";
import { DEFAULT_API_HOST } from "../constant";
export function corsPath(path: string) {
const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_API_HOST}` : "";
if (baseUrl === "" && path === "") {
return "";
}
if (!path.startsWith("/")) {
path = "/" + path;
}
if (!path.endsWith("/")) {
path += "/";
}
return `${baseUrl}${path}`;
}

107
app/utils/stream.ts Normal file
View File

@ -0,0 +1,107 @@
// using tauri command to send request
// see src-tauri/src/stream.rs, and src-tauri/src/main.rs
// 1. invoke('stream_fetch', {url, method, headers, body}), get response with headers.
// 2. listen event: `stream-response` multi times to get body
type ResponseEvent = {
id: number;
payload: {
request_id: number;
status?: number;
chunk?: number[];
};
};
type StreamResponse = {
request_id: number;
status: number;
status_text: string;
headers: Record<string, string>;
};
export function fetch(url: string, options?: RequestInit): Promise<any> {
if (window.__TAURI__) {
const {
signal,
method = "GET",
headers: _headers = {},
body = [],
} = options || {};
let unlisten: Function | undefined;
let setRequestId: Function | undefined;
const requestIdPromise = new Promise((resolve) => (setRequestId = resolve));
const ts = new TransformStream();
const writer = ts.writable.getWriter();
let closed = false;
const close = () => {
if (closed) return;
closed = true;
unlisten && unlisten();
writer.ready.then(() => {
writer.close().catch((e) => console.error(e));
});
};
if (signal) {
signal.addEventListener("abort", () => close());
}
// @ts-ignore 2. listen response multi times, and write to Response.body
window.__TAURI__.event
.listen("stream-response", (e: ResponseEvent) =>
requestIdPromise.then((request_id) => {
const { request_id: rid, chunk, status } = e?.payload || {};
if (request_id != rid) {
return;
}
if (chunk) {
writer.ready.then(() => {
writer.write(new Uint8Array(chunk));
});
} else if (status === 0) {
// end of body
close();
}
}),
)
.then((u: Function) => (unlisten = u));
const headers: Record<string, string> = {
Accept: "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
"User-Agent": navigator.userAgent,
};
for (const item of new Headers(_headers || {})) {
headers[item[0]] = item[1];
}
return window.__TAURI__
.invoke("stream_fetch", {
method: method.toUpperCase(),
url,
headers,
// TODO FormData
body:
typeof body === "string"
? Array.from(new TextEncoder().encode(body))
: [],
})
.then((res: StreamResponse) => {
const { request_id, status, status_text: statusText, headers } = res;
setRequestId?.(request_id);
const response = new Response(ts.readable, {
status,
statusText,
headers,
});
if (status >= 300) {
setTimeout(close, 100);
}
return response;
})
.catch((e) => {
console.error("stream error", e);
throw e;
});
}
return window.fetch(url, options);
}

52
src-tauri/Cargo.lock generated
View File

@ -348,9 +348,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]] [[package]]
name = "bytes" name = "bytes"
version = "1.4.0" version = "1.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@ -942,9 +942,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]] [[package]]
name = "form_urlencoded" name = "form_urlencoded"
version = "1.1.0" version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
dependencies = [ dependencies = [
"percent-encoding", "percent-encoding",
] ]
@ -970,9 +970,9 @@ dependencies = [
[[package]] [[package]]
name = "futures-core" name = "futures-core"
version = "0.3.28" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]] [[package]]
name = "futures-executor" name = "futures-executor"
@ -987,9 +987,9 @@ dependencies = [
[[package]] [[package]]
name = "futures-io" name = "futures-io"
version = "0.3.28" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
[[package]] [[package]]
name = "futures-lite" name = "futures-lite"
@ -1008,9 +1008,9 @@ dependencies = [
[[package]] [[package]]
name = "futures-macro" name = "futures-macro"
version = "0.3.28" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -1019,21 +1019,21 @@ dependencies = [
[[package]] [[package]]
name = "futures-sink" name = "futures-sink"
version = "0.3.29" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
[[package]] [[package]]
name = "futures-task" name = "futures-task"
version = "0.3.28" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]] [[package]]
name = "futures-util" name = "futures-util"
version = "0.3.28" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [ dependencies = [
"futures-core", "futures-core",
"futures-io", "futures-io",
@ -1555,9 +1555,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]] [[package]]
name = "idna" name = "idna"
version = "0.3.0" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
dependencies = [ dependencies = [
"unicode-bidi", "unicode-bidi",
"unicode-normalization", "unicode-normalization",
@ -1986,6 +1986,10 @@ checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
name = "nextchat" name = "nextchat"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bytes",
"futures-util",
"percent-encoding",
"reqwest",
"serde", "serde",
"serde_json", "serde_json",
"tauri", "tauri",
@ -2281,9 +2285,9 @@ checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
[[package]] [[package]]
name = "percent-encoding" name = "percent-encoding"
version = "2.2.0" version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]] [[package]]
name = "phf" name = "phf"
@ -2545,9 +2549,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.58" version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
@ -3889,9 +3893,9 @@ checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]] [[package]]
name = "url" name = "url"
version = "2.3.1" version = "2.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
dependencies = [ dependencies = [
"form_urlencoded", "form_urlencoded",
"idna", "idna",

View File

@ -37,6 +37,10 @@ tauri = { version = "1.5.4", features = [ "http-all",
"window-unminimize", "window-unminimize",
] } ] }
tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" } tauri-plugin-window-state = { git = "https://github.com/tauri-apps/plugins-workspace", branch = "v1" }
percent-encoding = "2.3.1"
reqwest = "0.11.18"
futures-util = "0.3.30"
bytes = "1.7.2"
[features] [features]
# this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled. # this feature is used for production builds or when `devPath` points to the filesystem and the built-in dev server is disabled.

View File

@ -1,8 +1,11 @@
// Prevents additional console window on Windows in release, DO NOT REMOVE!! // Prevents additional console window on Windows in release, DO NOT REMOVE!!
#![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")]
mod stream;
fn main() { fn main() {
tauri::Builder::default() tauri::Builder::default()
.invoke_handler(tauri::generate_handler![stream::stream_fetch])
.plugin(tauri_plugin_window_state::Builder::default().build()) .plugin(tauri_plugin_window_state::Builder::default().build())
.run(tauri::generate_context!()) .run(tauri::generate_context!())
.expect("error while running tauri application"); .expect("error while running tauri application");

134
src-tauri/src/stream.rs Normal file
View File

@ -0,0 +1,134 @@
//
//
use std::time::Duration;
use std::error::Error;
use std::sync::atomic::{AtomicU32, Ordering};
use std::collections::HashMap;
use futures_util::{StreamExt};
use reqwest::Client;
use reqwest::header::{HeaderName, HeaderMap};
static REQUEST_COUNTER: AtomicU32 = AtomicU32::new(0);
#[derive(Debug, Clone, serde::Serialize)]
pub struct StreamResponse {
request_id: u32,
status: u16,
status_text: String,
headers: HashMap<String, String>
}
#[derive(Clone, serde::Serialize)]
pub struct EndPayload {
request_id: u32,
status: u16,
}
#[derive(Clone, serde::Serialize)]
pub struct ChunkPayload {
request_id: u32,
chunk: bytes::Bytes,
}
#[tauri::command]
pub async fn stream_fetch(
window: tauri::Window,
method: String,
url: String,
headers: HashMap<String, String>,
body: Vec<u8>,
) -> Result<StreamResponse, String> {
let event_name = "stream-response";
let request_id = REQUEST_COUNTER.fetch_add(1, Ordering::SeqCst);
let mut _headers = HeaderMap::new();
for (key, value) in &headers {
_headers.insert(key.parse::<HeaderName>().unwrap(), value.parse().unwrap());
}
// println!("method: {:?}", method);
// println!("url: {:?}", url);
// println!("headers: {:?}", headers);
// println!("headers: {:?}", _headers);
let method = method.parse::<reqwest::Method>().map_err(|err| format!("failed to parse method: {}", err))?;
let client = Client::builder()
.default_headers(_headers)
.redirect(reqwest::redirect::Policy::limited(3))
.connect_timeout(Duration::new(3, 0))
.build()
.map_err(|err| format!("failed to generate client: {}", err))?;
let mut request = client.request(
method.clone(),
url.parse::<reqwest::Url>().map_err(|err| format!("failed to parse url: {}", err))?
);
if method == reqwest::Method::POST || method == reqwest::Method::PUT || method == reqwest::Method::PATCH {
let body = bytes::Bytes::from(body);
// println!("body: {:?}", body);
request = request.body(body);
}
// println!("client: {:?}", client);
// println!("request: {:?}", request);
let response_future = request.send();
let res = response_future.await;
let response = match res {
Ok(res) => {
// get response and emit to client
let mut headers = HashMap::new();
for (name, value) in res.headers() {
headers.insert(
name.as_str().to_string(),
std::str::from_utf8(value.as_bytes()).unwrap().to_string()
);
}
let status = res.status().as_u16();
tauri::async_runtime::spawn(async move {
let mut stream = res.bytes_stream();
while let Some(chunk) = stream.next().await {
match chunk {
Ok(bytes) => {
// println!("chunk: {:?}", bytes);
if let Err(e) = window.emit(event_name, ChunkPayload{ request_id, chunk: bytes }) {
println!("Failed to emit chunk payload: {:?}", e);
}
}
Err(err) => {
println!("Error chunk: {:?}", err);
}
}
}
if let Err(e) = window.emit(event_name, EndPayload{ request_id, status: 0 }) {
println!("Failed to emit end payload: {:?}", e);
}
});
StreamResponse {
request_id,
status,
status_text: "OK".to_string(),
headers,
}
}
Err(err) => {
println!("Error response: {:?}", err.source().expect("REASON").to_string());
StreamResponse {
request_id,
status: 599,
status_text: err.source().expect("REASON").to_string(),
headers: HashMap::new(),
}
}
};
// println!("Response: {:?}", response);
Ok(response)
}

View File

@ -9,7 +9,7 @@
}, },
"package": { "package": {
"productName": "NextChat", "productName": "NextChat",
"version": "2.15.3" "version": "2.15.4"
}, },
"tauri": { "tauri": {
"allowlist": { "allowlist": {