diff --git a/README.md b/README.md
index 0c06b73f0..dda896cbf 100644
--- a/README.md
+++ b/README.md
@@ -1,16 +1,17 @@
-
-
+
+
+
NextChat (ChatGPT Next Web)
English / [简体中文](./README_CN.md)
-One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support.
+One-Click to get a well-designed cross-platform ChatGPT web UI, with Claude, GPT4 & Gemini Pro support.
-一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
+一键免费部署你的跨平台私人 ChatGPT 应用, 支持 Claude, GPT4 & Gemini Pro 模型。
[![Saas][Saas-image]][saas-url]
[![Web][Web-image]][web-url]
@@ -18,9 +19,9 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
-[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
+[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
-[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
+[NextChatAI](https://nextchat.dev/chat) / [自部署网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
[saas-url]: https://nextchat.dev/chat?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
@@ -31,7 +32,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
-[

](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [

](https://zeabur.com/templates/ZBUEFA) [

](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [

](https://www.bt.cn/new/download.html) [

](https://computenest.aliyun.com/market/service-f1c9b75e59814dc49d52)
+[

](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [

](https://zeabur.com/templates/ZBUEFA) [

](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [

](https://www.bt.cn/new/download.html)
[

](https://monica.im/?utm=nxcrp)
@@ -311,6 +312,14 @@ ChatGLM Api Key.
ChatGLM Api Url.
+### `DEEPSEEK_API_KEY` (optional)
+
+DeepSeek Api Key.
+
+### `DEEPSEEK_URL` (optional)
+
+DeepSeek Api Url.
+
### `HIDE_USER_API_KEY` (optional)
> Default: Empty
@@ -355,6 +364,13 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
Change default model
+### `VISION_MODELS` (optional)
+
+> Default: Empty
+> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc).
+
+Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas.
+
### `WHITE_WEBDAV_ENDPOINTS` (optional)
You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format:
diff --git a/README_CN.md b/README_CN.md
index d4da8b9da..aa95d6b5c 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -192,6 +192,14 @@ ChatGLM Api Key.
ChatGLM Api Url.
+### `DEEPSEEK_API_KEY` (可选)
+
+DeepSeek Api Key.
+
+### `DEEPSEEK_URL` (可选)
+
+DeepSeek Api Url.
+
### `HIDE_USER_API_KEY` (可选)
@@ -235,6 +243,13 @@ ChatGLM Api Url.
更改默认模型
+### `VISION_MODELS` (可选)
+
+> 默认值:空
+> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。
+
+在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。
+
### `DEFAULT_INPUT_TEMPLATE` (可选)
自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项
diff --git a/README_JA.md b/README_JA.md
index 062c11262..29eb0d275 100644
--- a/README_JA.md
+++ b/README_JA.md
@@ -217,6 +217,13 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ
デフォルトのモデルを変更します。
+### `VISION_MODELS` (オプション)
+
+> デフォルト:空
+> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。
+
+デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。
+
### `DEFAULT_INPUT_TEMPLATE` (オプション)
『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。
diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts
index 3017fd371..3b5833d7e 100644
--- a/app/api/[provider]/[...path]/route.ts
+++ b/app/api/[provider]/[...path]/route.ts
@@ -10,6 +10,7 @@ import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability";
import { handle as iflytekHandler } from "../../iflytek";
+import { handle as deepseekHandler } from "../../deepseek";
import { handle as xaiHandler } from "../../xai";
import { handle as chatglmHandler } from "../../glm";
import { handle as proxyHandler } from "../../proxy";
@@ -40,6 +41,8 @@ async function handle(
return stabilityHandler(req, { params });
case ApiPath.Iflytek:
return iflytekHandler(req, { params });
+ case ApiPath.DeepSeek:
+ return deepseekHandler(req, { params });
case ApiPath.XAI:
return xaiHandler(req, { params });
case ApiPath.ChatGLM:
diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts
index 894b1ae4c..20f6caefa 100644
--- a/app/api/alibaba.ts
+++ b/app/api/alibaba.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Alibaba as string,
diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts
index 7a4444371..b96637b2c 100644
--- a/app/api/anthropic.ts
+++ b/app/api/anthropic.ts
@@ -9,7 +9,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
@@ -122,7 +122,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Anthropic as string,
diff --git a/app/api/auth.ts b/app/api/auth.ts
index 6703b64bd..1760c249c 100644
--- a/app/api/auth.ts
+++ b/app/api/auth.ts
@@ -92,6 +92,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
systemApiKey =
serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
break;
+ case ModelProvider.DeepSeek:
+ systemApiKey = serverConfig.deepseekApiKey;
+ break;
case ModelProvider.XAI:
systemApiKey = serverConfig.xaiApiKey;
break;
diff --git a/app/api/baidu.ts b/app/api/baidu.ts
index 0408b43c5..0f4e05ee8 100644
--- a/app/api/baidu.ts
+++ b/app/api/baidu.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig();
@@ -104,7 +104,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Baidu as string,
diff --git a/app/api/bytedance.ts b/app/api/bytedance.ts
index cb65b1061..51b39ceb7 100644
--- a/app/api/bytedance.ts
+++ b/app/api/bytedance.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ByteDance as string,
diff --git a/app/api/common.ts b/app/api/common.ts
index 495a12ccd..b7e41fa26 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -2,7 +2,7 @@ import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
-import { getModelProvider, isModelAvailableInServer } from "../utils/model";
+import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
const serverConfig = getServerSideConfig();
@@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
- ServiceProvider.OpenAI as string,
- ) ||
- isModelAvailableInServer(
- serverConfig.customModels,
- jsonBody?.model as string,
- ServiceProvider.Azure as string,
+ [
+ ServiceProvider.OpenAI,
+ ServiceProvider.Azure,
+ jsonBody?.model as string, // support provider-unspecified model
+ ],
)
) {
return NextResponse.json(
diff --git a/app/api/config/route.ts b/app/api/config/route.ts
index b0d9da031..855a5db01 100644
--- a/app/api/config/route.ts
+++ b/app/api/config/route.ts
@@ -14,6 +14,7 @@ const DANGER_CONFIG = {
disableFastLink: serverConfig.disableFastLink,
customModels: serverConfig.customModels,
defaultModel: serverConfig.defaultModel,
+ visionModels: serverConfig.visionModels,
};
declare global {
diff --git a/app/api/deepseek.ts b/app/api/deepseek.ts
new file mode 100644
index 000000000..a9879eced
--- /dev/null
+++ b/app/api/deepseek.ts
@@ -0,0 +1,128 @@
+import { getServerSideConfig } from "@/app/config/server";
+import {
+ DEEPSEEK_BASE_URL,
+ ApiPath,
+ ModelProvider,
+ ServiceProvider,
+} from "@/app/constant";
+import { prettyObject } from "@/app/utils/format";
+import { NextRequest, NextResponse } from "next/server";
+import { auth } from "@/app/api/auth";
+import { isModelNotavailableInServer } from "@/app/utils/model";
+
+const serverConfig = getServerSideConfig();
+
+export async function handle(
+ req: NextRequest,
+ { params }: { params: { path: string[] } },
+) {
+ console.log("[DeepSeek Route] params ", params);
+
+ if (req.method === "OPTIONS") {
+ return NextResponse.json({ body: "OK" }, { status: 200 });
+ }
+
+ const authResult = auth(req, ModelProvider.DeepSeek);
+ if (authResult.error) {
+ return NextResponse.json(authResult, {
+ status: 401,
+ });
+ }
+
+ try {
+ const response = await request(req);
+ return response;
+ } catch (e) {
+ console.error("[DeepSeek] ", e);
+ return NextResponse.json(prettyObject(e));
+ }
+}
+
+async function request(req: NextRequest) {
+ const controller = new AbortController();
+
+ // alibaba use base url or just remove the path
+ let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");
+
+ let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;
+
+ if (!baseUrl.startsWith("http")) {
+ baseUrl = `https://${baseUrl}`;
+ }
+
+ if (baseUrl.endsWith("/")) {
+ baseUrl = baseUrl.slice(0, -1);
+ }
+
+ console.log("[Proxy] ", path);
+ console.log("[Base Url]", baseUrl);
+
+ const timeoutId = setTimeout(
+ () => {
+ controller.abort();
+ },
+ 10 * 60 * 1000,
+ );
+
+ const fetchUrl = `${baseUrl}${path}`;
+ const fetchOptions: RequestInit = {
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: req.headers.get("Authorization") ?? "",
+ },
+ method: req.method,
+ body: req.body,
+ redirect: "manual",
+ // @ts-ignore
+ duplex: "half",
+ signal: controller.signal,
+ };
+
+ // #1815 try to refuse some request to some models
+ if (serverConfig.customModels && req.body) {
+ try {
+ const clonedBody = await req.text();
+ fetchOptions.body = clonedBody;
+
+ const jsonBody = JSON.parse(clonedBody) as { model?: string };
+
+ // not undefined and is false
+ if (
+ isModelNotavailableInServer(
+ serverConfig.customModels,
+ jsonBody?.model as string,
+ ServiceProvider.DeepSeek as string,
+ )
+ ) {
+ return NextResponse.json(
+ {
+ error: true,
+ message: `you are not allowed to use ${jsonBody?.model} model`,
+ },
+ {
+ status: 403,
+ },
+ );
+ }
+ } catch (e) {
+ console.error(`[DeepSeek] filter`, e);
+ }
+ }
+ try {
+ const res = await fetch(fetchUrl, fetchOptions);
+
+ // to prevent browser prompt for credentials
+ const newHeaders = new Headers(res.headers);
+ newHeaders.delete("www-authenticate");
+ // to disable nginx buffering
+ newHeaders.set("X-Accel-Buffering", "no");
+
+ return new Response(res.body, {
+ status: res.status,
+ statusText: res.statusText,
+ headers: newHeaders,
+ });
+ } finally {
+ clearTimeout(timeoutId);
+ }
+}
diff --git a/app/api/glm.ts b/app/api/glm.ts
index 3625b9f7b..8431c5db5 100644
--- a/app/api/glm.ts
+++ b/app/api/glm.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ChatGLM as string,
diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts
index 8b8227dce..6624f74e9 100644
--- a/app/api/iflytek.ts
+++ b/app/api/iflytek.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Iflytek as string,
diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts
index 5bf4807e3..792d14d33 100644
--- a/app/api/moonshot.ts
+++ b/app/api/moonshot.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Moonshot as string,
diff --git a/app/api/xai.ts b/app/api/xai.ts
index a4ee8b397..4aad5e5fb 100644
--- a/app/api/xai.ts
+++ b/app/api/xai.ts
@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
-import { isModelAvailableInServer } from "@/app/utils/model";
+import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
- isModelAvailableInServer(
+ isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.XAI as string,
diff --git a/app/client/api.ts b/app/client/api.ts
index 1da81e964..8f263763b 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -20,6 +20,7 @@ import { QwenApi } from "./platforms/alibaba";
import { HunyuanApi } from "./platforms/tencent";
import { MoonshotApi } from "./platforms/moonshot";
import { SparkApi } from "./platforms/iflytek";
+import { DeepSeekApi } from "./platforms/deepseek";
import { XAIApi } from "./platforms/xai";
import { ChatGLMApi } from "./platforms/glm";
@@ -154,6 +155,9 @@ export class ClientApi {
case ModelProvider.Iflytek:
this.llm = new SparkApi();
break;
+ case ModelProvider.DeepSeek:
+ this.llm = new DeepSeekApi();
+ break;
case ModelProvider.XAI:
this.llm = new XAIApi();
break;
@@ -247,6 +251,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
+ const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
const isEnabledAccessControl = accessStore.enabledAccessControl();
@@ -264,6 +269,8 @@ export function getHeaders(ignoreHeaders: boolean = false) {
? accessStore.moonshotApiKey
: isXAI
? accessStore.xaiApiKey
+ : isDeepSeek
+ ? accessStore.deepseekApiKey
: isChatGLM
? accessStore.chatglmApiKey
: isIflytek
@@ -280,6 +287,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAlibaba,
isMoonshot,
isIflytek,
+ isDeepSeek,
isXAI,
isChatGLM,
apiKey,
@@ -302,6 +310,13 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAzure,
isAnthropic,
isBaidu,
+ isByteDance,
+ isAlibaba,
+ isMoonshot,
+ isIflytek,
+ isDeepSeek,
+ isXAI,
+ isChatGLM,
apiKey,
isEnabledAccessControl,
} = getConfig();
@@ -344,6 +359,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
return new ClientApi(ModelProvider.Moonshot);
case ServiceProvider.Iflytek:
return new ClientApi(ModelProvider.Iflytek);
+ case ServiceProvider.DeepSeek:
+ return new ClientApi(ModelProvider.DeepSeek);
case ServiceProvider.XAI:
return new ClientApi(ModelProvider.XAI);
case ServiceProvider.ChatGLM:
diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts
new file mode 100644
index 000000000..e2ae645c6
--- /dev/null
+++ b/app/client/platforms/deepseek.ts
@@ -0,0 +1,200 @@
+"use client";
+// azure and openai, using same models. so using same LLMApi.
+import {
+ ApiPath,
+ DEEPSEEK_BASE_URL,
+ DeepSeek,
+ REQUEST_TIMEOUT_MS,
+} from "@/app/constant";
+import {
+ useAccessStore,
+ useAppConfig,
+ useChatStore,
+ ChatMessageTool,
+ usePluginStore,
+} from "@/app/store";
+import { stream } from "@/app/utils/chat";
+import {
+ ChatOptions,
+ getHeaders,
+ LLMApi,
+ LLMModel,
+ SpeechOptions,
+} from "../api";
+import { getClientConfig } from "@/app/config/client";
+import { getMessageTextContent } from "@/app/utils";
+import { RequestPayload } from "./openai";
+import { fetch } from "@/app/utils/stream";
+
+export class DeepSeekApi implements LLMApi {
+ private disableListModels = true;
+
+ path(path: string): string {
+ const accessStore = useAccessStore.getState();
+
+ let baseUrl = "";
+
+ if (accessStore.useCustomConfig) {
+ baseUrl = accessStore.deepseekUrl;
+ }
+
+ if (baseUrl.length === 0) {
+ const isApp = !!getClientConfig()?.isApp;
+ const apiPath = ApiPath.DeepSeek;
+ baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
+ }
+
+ if (baseUrl.endsWith("/")) {
+ baseUrl = baseUrl.slice(0, baseUrl.length - 1);
+ }
+ if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
+ baseUrl = "https://" + baseUrl;
+ }
+
+ console.log("[Proxy Endpoint] ", baseUrl, path);
+
+ return [baseUrl, path].join("/");
+ }
+
+ extractMessage(res: any) {
+ return res.choices?.at(0)?.message?.content ?? "";
+ }
+
+ speech(options: SpeechOptions): Promise
{
+ throw new Error("Method not implemented.");
+ }
+
+ async chat(options: ChatOptions) {
+ const messages: ChatOptions["messages"] = [];
+ for (const v of options.messages) {
+ const content = getMessageTextContent(v);
+ messages.push({ role: v.role, content });
+ }
+
+ const modelConfig = {
+ ...useAppConfig.getState().modelConfig,
+ ...useChatStore.getState().currentSession().mask.modelConfig,
+ ...{
+ model: options.config.model,
+ providerName: options.config.providerName,
+ },
+ };
+
+ const requestPayload: RequestPayload = {
+ messages,
+ stream: options.config.stream,
+ model: modelConfig.model,
+ temperature: modelConfig.temperature,
+ presence_penalty: modelConfig.presence_penalty,
+ frequency_penalty: modelConfig.frequency_penalty,
+ top_p: modelConfig.top_p,
+ // max_tokens: Math.max(modelConfig.max_tokens, 1024),
+ // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
+ };
+
+ console.log("[Request] openai payload: ", requestPayload);
+
+ const shouldStream = !!options.config.stream;
+ const controller = new AbortController();
+ options.onController?.(controller);
+
+ try {
+ const chatPath = this.path(DeepSeek.ChatPath);
+ const chatPayload = {
+ method: "POST",
+ body: JSON.stringify(requestPayload),
+ signal: controller.signal,
+ headers: getHeaders(),
+ };
+
+ // make a fetch request
+ const requestTimeoutId = setTimeout(
+ () => controller.abort(),
+ REQUEST_TIMEOUT_MS,
+ );
+
+ if (shouldStream) {
+ const [tools, funcs] = usePluginStore
+ .getState()
+ .getAsTools(
+ useChatStore.getState().currentSession().mask?.plugin || [],
+ );
+ return stream(
+ chatPath,
+ requestPayload,
+ getHeaders(),
+ tools as any,
+ funcs,
+ controller,
+ // parseSSE
+ (text: string, runTools: ChatMessageTool[]) => {
+ // console.log("parseSSE", text, runTools);
+ const json = JSON.parse(text);
+ const choices = json.choices as Array<{
+ delta: {
+ content: string;
+ tool_calls: ChatMessageTool[];
+ };
+ }>;
+ const tool_calls = choices[0]?.delta?.tool_calls;
+ if (tool_calls?.length > 0) {
+ const index = tool_calls[0]?.index;
+ const id = tool_calls[0]?.id;
+ const args = tool_calls[0]?.function?.arguments;
+ if (id) {
+ runTools.push({
+ id,
+ type: tool_calls[0]?.type,
+ function: {
+ name: tool_calls[0]?.function?.name as string,
+ arguments: args,
+ },
+ });
+ } else {
+ // @ts-ignore
+ runTools[index]["function"]["arguments"] += args;
+ }
+ }
+ return choices[0]?.delta?.content;
+ },
+ // processToolMessage, include tool_calls message and tool call results
+ (
+ requestPayload: RequestPayload,
+ toolCallMessage: any,
+ toolCallResult: any[],
+ ) => {
+ // @ts-ignore
+ requestPayload?.messages?.splice(
+ // @ts-ignore
+ requestPayload?.messages?.length,
+ 0,
+ toolCallMessage,
+ ...toolCallResult,
+ );
+ },
+ options,
+ );
+ } else {
+ const res = await fetch(chatPath, chatPayload);
+ clearTimeout(requestTimeoutId);
+
+ const resJson = await res.json();
+ const message = this.extractMessage(resJson);
+ options.onFinish(message, res);
+ }
+ } catch (e) {
+ console.log("[Request] failed to make a chat request", e);
+ options.onError?.(e as Error);
+ }
+ }
+ async usage() {
+ return {
+ used: 0,
+ total: 0,
+ };
+ }
+
+ async models(): Promise {
+ return [];
+ }
+}
diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts
index a7965947f..a8d1869e3 100644
--- a/app/client/platforms/glm.ts
+++ b/app/client/platforms/glm.ts
@@ -21,16 +21,108 @@ import {
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
-import { getMessageTextContent } from "@/app/utils";
+import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
+import { preProcessImageContent } from "@/app/utils/chat";
+
+interface BasePayload {
+ model: string;
+}
+
+interface ChatPayload extends BasePayload {
+ messages: ChatOptions["messages"];
+ stream?: boolean;
+ temperature?: number;
+ presence_penalty?: number;
+ frequency_penalty?: number;
+ top_p?: number;
+}
+
+interface ImageGenerationPayload extends BasePayload {
+ prompt: string;
+ size?: string;
+ user_id?: string;
+}
+
+interface VideoGenerationPayload extends BasePayload {
+ prompt: string;
+ duration?: number;
+ resolution?: string;
+ user_id?: string;
+}
+
+type ModelType = "chat" | "image" | "video";
export class ChatGLMApi implements LLMApi {
private disableListModels = true;
+ private getModelType(model: string): ModelType {
+ if (model.startsWith("cogview-")) return "image";
+ if (model.startsWith("cogvideo-")) return "video";
+ return "chat";
+ }
+
+ private getModelPath(type: ModelType): string {
+ switch (type) {
+ case "image":
+ return ChatGLM.ImagePath;
+ case "video":
+ return ChatGLM.VideoPath;
+ default:
+ return ChatGLM.ChatPath;
+ }
+ }
+
+ private createPayload(
+ messages: ChatOptions["messages"],
+ modelConfig: any,
+ options: ChatOptions,
+ ): BasePayload {
+ const modelType = this.getModelType(modelConfig.model);
+ const lastMessage = messages[messages.length - 1];
+ const prompt =
+ typeof lastMessage.content === "string"
+ ? lastMessage.content
+ : lastMessage.content.map((c) => c.text).join("\n");
+
+ switch (modelType) {
+ case "image":
+ return {
+ model: modelConfig.model,
+ prompt,
+ size: options.config.size,
+ } as ImageGenerationPayload;
+ default:
+ return {
+ messages,
+ stream: options.config.stream,
+ model: modelConfig.model,
+ temperature: modelConfig.temperature,
+ presence_penalty: modelConfig.presence_penalty,
+ frequency_penalty: modelConfig.frequency_penalty,
+ top_p: modelConfig.top_p,
+ } as ChatPayload;
+ }
+ }
+
+ private parseResponse(modelType: ModelType, json: any): string {
+ switch (modelType) {
+ case "image": {
+ const imageUrl = json.data?.[0]?.url;
+ return imageUrl ? `` : "";
+ }
+ case "video": {
+ const videoUrl = json.data?.[0]?.url;
+ return videoUrl ? `` : "";
+ }
+ default:
+ return this.extractMessage(json);
+ }
+ }
+
path(path: string): string {
const accessStore = useAccessStore.getState();
-
let baseUrl = "";
if (accessStore.useCustomConfig) {
@@ -51,7 +143,6 @@ export class ChatGLMApi implements LLMApi {
}
console.log("[Proxy Endpoint] ", baseUrl, path);
-
return [baseUrl, path].join("/");
}
@@ -64,9 +155,12 @@ export class ChatGLMApi implements LLMApi {
}
async chat(options: ChatOptions) {
+ const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
- const content = getMessageTextContent(v);
+ const content = visionModel
+ ? await preProcessImageContent(v.content)
+ : getMessageTextContent(v);
messages.push({ role: v.role, content });
}
@@ -78,25 +172,16 @@ export class ChatGLMApi implements LLMApi {
providerName: options.config.providerName,
},
};
+ const modelType = this.getModelType(modelConfig.model);
+ const requestPayload = this.createPayload(messages, modelConfig, options);
+ const path = this.path(this.getModelPath(modelType));
- const requestPayload: RequestPayload = {
- messages,
- stream: options.config.stream,
- model: modelConfig.model,
- temperature: modelConfig.temperature,
- presence_penalty: modelConfig.presence_penalty,
- frequency_penalty: modelConfig.frequency_penalty,
- top_p: modelConfig.top_p,
- };
+ console.log(`[Request] glm ${modelType} payload: `, requestPayload);
- console.log("[Request] glm payload: ", requestPayload);
-
- const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
- const chatPath = this.path(ChatGLM.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
@@ -104,12 +189,23 @@ export class ChatGLMApi implements LLMApi {
headers: getHeaders(),
};
- // make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
+ if (modelType === "image" || modelType === "video") {
+ const res = await fetch(path, chatPayload);
+ clearTimeout(requestTimeoutId);
+
+ const resJson = await res.json();
+ console.log(`[Response] glm ${modelType}:`, resJson);
+ const message = this.parseResponse(modelType, resJson);
+ options.onFinish(message, res);
+ return;
+ }
+
+ const shouldStream = !!options.config.stream;
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
@@ -117,7 +213,7 @@ export class ChatGLMApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
- chatPath,
+ path,
requestPayload,
getHeaders(),
tools as any,
@@ -125,7 +221,6 @@ export class ChatGLMApi implements LLMApi {
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
- // console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
@@ -154,7 +249,7 @@ export class ChatGLMApi implements LLMApi {
}
return choices[0]?.delta?.content;
},
- // processToolMessage, include tool_calls message and tool call results
+ // processToolMessage
(
requestPayload: RequestPayload,
toolCallMessage: any,
@@ -172,7 +267,7 @@ export class ChatGLMApi implements LLMApi {
options,
);
} else {
- const res = await fetch(chatPath, chatPayload);
+ const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
@@ -184,6 +279,7 @@ export class ChatGLMApi implements LLMApi {
options.onError?.(e as Error);
}
}
+
async usage() {
return {
used: 0,
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index a7bce4fc2..5ca8e1071 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -60,9 +60,18 @@ export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res);
+ const getTextFromParts = (parts: any[]) => {
+ if (!Array.isArray(parts)) return "";
+
+ return parts
+ .map((part) => part?.text || "")
+ .filter((text) => text.trim() !== "")
+ .join("\n\n");
+ };
+
return (
- res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
- res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text ||
+ getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
+ getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message ||
""
);
@@ -223,7 +232,10 @@ export class GeminiProApi implements LLMApi {
},
});
}
- return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
+ return chunkJson?.candidates
+ ?.at(0)
+ ?.content.parts?.map((part: { text: string }) => part.text)
+ .join("\n\n");
},
// processToolMessage, include tool_calls message and tool call results
(
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index 15cfb7ca6..5a110b84b 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -24,7 +24,7 @@ import {
stream,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
-import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing";
+import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
import {
ChatOptions,
@@ -73,7 +73,7 @@ export interface DalleRequestPayload {
prompt: string;
response_format: "url" | "b64_json";
n: number;
- size: DalleSize;
+ size: ModelSize;
quality: DalleQuality;
style: DalleStyle;
}
diff --git a/app/components/chat.tsx b/app/components/chat.tsx
index 51fe74fe7..9990a359e 100644
--- a/app/components/chat.tsx
+++ b/app/components/chat.tsx
@@ -72,6 +72,8 @@ import {
isDalle3,
showPlugins,
safeLocalStorage,
+ getModelSizes,
+ supportsCustomSize,
} from "../utils";
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
@@ -79,7 +81,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
-import { DalleSize, DalleQuality, DalleStyle } from "../typing";
+import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
import Locale from "../locales";
@@ -519,10 +521,11 @@ export function ChatActions(props: {
const [showSizeSelector, setShowSizeSelector] = useState(false);
const [showQualitySelector, setShowQualitySelector] = useState(false);
const [showStyleSelector, setShowStyleSelector] = useState(false);
- const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"];
+ const modelSizes = getModelSizes(currentModel);
const dalle3Qualitys: DalleQuality[] = ["standard", "hd"];
const dalle3Styles: DalleStyle[] = ["vivid", "natural"];
- const currentSize = session.mask.modelConfig?.size ?? "1024x1024";
+ const currentSize =
+ session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize);
const currentQuality = session.mask.modelConfig?.quality ?? "standard";
const currentStyle = session.mask.modelConfig?.style ?? "vivid";
@@ -673,7 +676,7 @@ export function ChatActions(props: {
/>
)}
- {isDalle3(currentModel) && (
+ {supportsCustomSize(currentModel) && (
setShowSizeSelector(true)}
text={currentSize}
@@ -684,7 +687,7 @@ export function ChatActions(props: {
{showSizeSelector && (
({
+ items={modelSizes.map((m) => ({
title: m,
value: m,
}))}
@@ -897,6 +900,12 @@ export function ShortcutKeyModal(props: { onClose: () => void }) {
title: Locale.Chat.ShortcutKey.showShortcutKey,
keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"],
},
+ {
+ title: Locale.Chat.ShortcutKey.clearContext,
+ keys: isMac
+ ? ["⌘", "Shift", "backspace"]
+ : ["Ctrl", "Shift", "backspace"],
+ },
];
return (
@@ -1549,7 +1558,7 @@ function _Chat() {
const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false);
useEffect(() => {
- const handleKeyDown = (event: any) => {
+ const handleKeyDown = (event: KeyboardEvent) => {
// 打开新聊天 command + shift + o
if (
(event.metaKey || event.ctrlKey) &&
@@ -1600,14 +1609,30 @@ function _Chat() {
event.preventDefault();
setShowShortcutKeyModal(true);
}
+ // 清除上下文 command + shift + backspace
+ else if (
+ (event.metaKey || event.ctrlKey) &&
+ event.shiftKey &&
+ event.key.toLowerCase() === "backspace"
+ ) {
+ event.preventDefault();
+ chatStore.updateTargetSession(session, (session) => {
+ if (session.clearContextIndex === session.messages.length) {
+ session.clearContextIndex = undefined;
+ } else {
+ session.clearContextIndex = session.messages.length;
+ session.memoryPrompt = ""; // will clear memory
+ }
+ });
+ }
};
- window.addEventListener("keydown", handleKeyDown);
+ document.addEventListener("keydown", handleKeyDown);
return () => {
- window.removeEventListener("keydown", handleKeyDown);
+ document.removeEventListener("keydown", handleKeyDown);
};
- }, [messages, chatStore, navigate]);
+ }, [messages, chatStore, navigate, session]);
const [showChatSidePanel, setShowChatSidePanel] = useState(false);
diff --git a/app/components/settings.tsx b/app/components/settings.tsx
index a74ff17b1..3b990ed2c 100644
--- a/app/components/settings.tsx
+++ b/app/components/settings.tsx
@@ -73,6 +73,7 @@ import {
Iflytek,
SAAS_CHAT_URL,
ChatGLM,
+ DeepSeek,
} from "../constant";
import { Prompt, SearchService, usePromptStore } from "../store/prompt";
import { ErrorBoundary } from "./error";
@@ -1197,6 +1198,47 @@ export function Settings() {
>
);
+ const deepseekConfigComponent = accessStore.provider ===
+ ServiceProvider.DeepSeek && (
+ <>
+
+
+ accessStore.update(
+ (access) => (access.deepseekUrl = e.currentTarget.value),
+ )
+ }
+ >
+
+
+ {
+ accessStore.update(
+ (access) => (access.deepseekApiKey = e.currentTarget.value),
+ );
+ }}
+ />
+
+ >
+ );
+
const XAIConfigComponent = accessStore.provider === ServiceProvider.XAI && (
<>
(await import("./chat-list")).ChatList, {
loading: () => null,
});
@@ -219,7 +224,7 @@ export function SideBarTail(props: {
export function SideBar(props: { className?: string }) {
useHotKey();
const { onDragStart, shouldNarrow } = useDragSideBar();
- const [showPluginSelector, setShowPluginSelector] = useState(false);
+ const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
const navigate = useNavigate();
const config = useAppConfig();
const chatStore = useChatStore();
@@ -254,21 +259,21 @@ export function SideBar(props: { className?: string }) {
icon={}
text={shouldNarrow ? undefined : Locale.Discovery.Name}
className={styles["sidebar-bar-button"]}
- onClick={() => setShowPluginSelector(true)}
+ onClick={() => setshowDiscoverySelector(true)}
shadow
/>
- {showPluginSelector && (
+ {showDiscoverySelector && (
{
+ ...DISCOVERY.map((item) => {
return {
title: item.name,
value: item.path,
};
}),
]}
- onClose={() => setShowPluginSelector(false)}
+ onClose={() => setshowDiscoverySelector(false)}
onSelection={(s) => {
navigate(s[0], { state: { fromHome: true } });
}}
diff --git a/app/config/server.ts b/app/config/server.ts
index 9d6b3c2b8..73faa8815 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -1,5 +1,6 @@
import md5 from "spark-md5";
import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant";
+import { isGPT4Model } from "../utils/model";
declare global {
namespace NodeJS {
@@ -22,6 +23,7 @@ declare global {
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
CUSTOM_MODELS?: string; // to control custom models
DEFAULT_MODEL?: string; // to control default model in every new chat window
+ VISION_MODELS?: string; // to control vision models
// stability only
STABILITY_URL?: string;
@@ -71,6 +73,9 @@ declare global {
IFLYTEK_API_KEY?: string;
IFLYTEK_API_SECRET?: string;
+ DEEPSEEK_URL?: string;
+ DEEPSEEK_API_KEY?: string;
+
// xai only
XAI_URL?: string;
XAI_API_KEY?: string;
@@ -124,23 +129,16 @@ export const getServerSideConfig = () => {
const disableGPT4 = !!process.env.DISABLE_GPT4;
let customModels = process.env.CUSTOM_MODELS ?? "";
let defaultModel = process.env.DEFAULT_MODEL ?? "";
+ let visionModels = process.env.VISION_MODELS ?? "";
if (disableGPT4) {
if (customModels) customModels += ",";
- customModels += DEFAULT_MODELS.filter(
- (m) =>
- (m.name.startsWith("gpt-4") || m.name.startsWith("chatgpt-4o") || m.name.startsWith("o1")) &&
- !m.name.startsWith("gpt-4o-mini"),
- )
+ customModels += DEFAULT_MODELS.filter((m) => isGPT4Model(m.name))
.map((m) => "-" + m.name)
.join(",");
- if (
- (defaultModel.startsWith("gpt-4") ||
- defaultModel.startsWith("chatgpt-4o") ||
- defaultModel.startsWith("o1")) &&
- !defaultModel.startsWith("gpt-4o-mini")
- )
+ if (defaultModel && isGPT4Model(defaultModel)) {
defaultModel = "";
+ }
}
const isStability = !!process.env.STABILITY_API_KEY;
@@ -155,6 +153,7 @@ export const getServerSideConfig = () => {
const isAlibaba = !!process.env.ALIBABA_API_KEY;
const isMoonshot = !!process.env.MOONSHOT_API_KEY;
const isIflytek = !!process.env.IFLYTEK_API_KEY;
+ const isDeepSeek = !!process.env.DEEPSEEK_API_KEY;
const isXAI = !!process.env.XAI_API_KEY;
const isChatGLM = !!process.env.CHATGLM_API_KEY;
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
@@ -219,6 +218,10 @@ export const getServerSideConfig = () => {
iflytekApiKey: process.env.IFLYTEK_API_KEY,
iflytekApiSecret: process.env.IFLYTEK_API_SECRET,
+ isDeepSeek,
+ deepseekUrl: process.env.DEEPSEEK_URL,
+ deepseekApiKey: getApiKey(process.env.DEEPSEEK_API_KEY),
+
isXAI,
xaiUrl: process.env.XAI_URL,
xaiApiKey: getApiKey(process.env.XAI_API_KEY),
@@ -248,6 +251,7 @@ export const getServerSideConfig = () => {
disableFastLink: !!process.env.DISABLE_FAST_LINK,
customModels,
defaultModel,
+ visionModels,
allowedWebDavEndpoints,
};
};
diff --git a/app/constant.ts b/app/constant.ts
index 264fe0013..f34d407c1 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -28,6 +28,8 @@ export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
export const MOONSHOT_BASE_URL = "https://api.moonshot.cn";
export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
+export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
+
export const XAI_BASE_URL = "https://api.x.ai";
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
@@ -65,6 +67,7 @@ export enum ApiPath {
Artifacts = "/api/artifacts",
XAI = "/api/xai",
ChatGLM = "/api/chatglm",
+ DeepSeek = "/api/deepseek",
}
export enum SlotID {
@@ -119,6 +122,7 @@ export enum ServiceProvider {
Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
+ DeepSeek = "DeepSeek",
}
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
@@ -143,6 +147,7 @@ export enum ModelProvider {
Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
+ DeepSeek = "DeepSeek",
}
export const Stability = {
@@ -225,6 +230,11 @@ export const Iflytek = {
ChatPath: "v1/chat/completions",
};
+export const DeepSeek = {
+ ExampleEndpoint: DEEPSEEK_BASE_URL,
+ ChatPath: "chat/completions",
+};
+
export const XAI = {
ExampleEndpoint: XAI_BASE_URL,
ChatPath: "v1/chat/completions",
@@ -233,6 +243,8 @@ export const XAI = {
export const ChatGLM = {
ExampleEndpoint: CHATGLM_BASE_URL,
ChatPath: "api/paas/v4/chat/completions",
+ ImagePath: "api/paas/v4/images/generations",
+ VideoPath: "api/paas/v4/videos/generations",
};
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
@@ -275,6 +287,8 @@ export const KnowledgeCutOffDate: Record = {
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12",
"gemini-pro-vision": "2023-12",
+ "deepseek-chat": "2024-07",
+ "deepseek-coder": "2024-07",
};
export const DEFAULT_TTS_ENGINE = "Edge-TTS";
@@ -291,6 +305,23 @@ export const DEFAULT_TTS_VOICES = [
"shimmer",
];
+export const VISION_MODEL_REGEXES = [
+ /vision/,
+ /gpt-4o/,
+ /claude-3/,
+ /gemini-1\.5/,
+ /gemini-exp/,
+ /gemini-2\.0/,
+ /learnlm/,
+ /qwen-vl/,
+ /qwen2-vl/,
+ /gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
+ /^dall-e-3$/, // Matches exactly "dall-e-3"
+ /glm-4v/,
+];
+
+export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
+
const openaiModels = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
@@ -317,13 +348,23 @@ const openaiModels = [
];
const googleModels = [
- "gemini-1.0-pro",
+ "gemini-1.0-pro", // Deprecated on 2/15/2025
"gemini-1.5-pro-latest",
+ "gemini-1.5-pro",
+ "gemini-1.5-pro-002",
+ "gemini-1.5-pro-exp-0827",
"gemini-1.5-flash-latest",
+ "gemini-1.5-flash-8b-latest",
+ "gemini-1.5-flash",
+ "gemini-1.5-flash-8b",
+ "gemini-1.5-flash-002",
+ "gemini-1.5-flash-exp-0827",
+ "learnlm-1.5-pro-experimental",
"gemini-exp-1114",
"gemini-exp-1121",
- "learnlm-1.5-pro-experimental",
- "gemini-pro-vision",
+ "gemini-exp-1206",
+ "gemini-2.0-flash-exp",
+ "gemini-2.0-flash-thinking-exp-1219",
];
const anthropicModels = [
@@ -394,6 +435,8 @@ const iflytekModels = [
"4.0Ultra",
];
+const deepseekModels = ["deepseek-chat", "deepseek-coder"];
+
const xAIModes = ["grok-beta"];
const chatglmModels = [
@@ -405,6 +448,15 @@ const chatglmModels = [
"glm-4-long",
"glm-4-flashx",
"glm-4-flash",
+ "glm-4v-plus",
+ "glm-4v",
+ "glm-4v-flash", // free
+ "cogview-3-plus",
+ "cogview-3",
+ "cogview-3-flash", // free
+ // 目前无法适配轮询任务
+ // "cogvideox",
+ // "cogvideox-flash", // free
];
let seq = 1000; // 内置的模型序号生成器从1000开始
@@ -541,6 +593,17 @@ export const DEFAULT_MODELS = [
sorted: 12,
},
})),
+ ...deepseekModels.map((name) => ({
+ name,
+ available: true,
+ sorted: seq++,
+ provider: {
+ id: "deepseek",
+ providerName: "DeepSeek",
+ providerType: "deepseek",
+ sorted: 13,
+ },
+ })),
] as const;
export const CHAT_PAGE_SIZE = 15;
@@ -560,11 +623,6 @@ export const internalAllowedWebDavEndpoints = [
];
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
-export const PLUGINS = [
- { name: "Plugins", path: Path.Plugins },
- { name: "Stable Diffusion", path: Path.Sd },
- { name: "Search Chat", path: Path.SearchChat },
-];
export const SAAS_CHAT_URL = "https://kaiyanmedical.com";
export const SAAS_CHAT_UTM_URL = "https://kaiyanmedical.com/chat?utm=github";
diff --git a/app/locales/cn.ts b/app/locales/cn.ts
index 47be019a8..25f49be7d 100644
--- a/app/locales/cn.ts
+++ b/app/locales/cn.ts
@@ -106,6 +106,7 @@ const cn = {
copyLastMessage: "复制最后一个回复",
copyLastCode: "复制最后一个代码块",
showShortcutKey: "显示快捷方式",
+ clearContext: "清除上下文",
},
},
Export: {
@@ -176,7 +177,7 @@ const cn = {
},
},
Lang: {
- Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
+ Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
All: "所有语言",
},
Avatar: "头像",
@@ -462,6 +463,17 @@ const cn = {
SubTitle: "样例:",
},
},
+ DeepSeek: {
+ ApiKey: {
+ Title: "接口密钥",
+ SubTitle: "使用自定义DeepSeek API Key",
+ Placeholder: "DeepSeek API Key",
+ },
+ Endpoint: {
+ Title: "接口地址",
+ SubTitle: "样例:",
+ },
+ },
XAI: {
ApiKey: {
Title: "接口密钥",
@@ -630,7 +642,7 @@ const cn = {
Sysmessage: "你是一个助手",
},
SearchChat: {
- Name: "搜索",
+ Name: "搜索聊天记录",
Page: {
Title: "搜索聊天记录",
Search: "输入搜索关键词",
diff --git a/app/locales/en.ts b/app/locales/en.ts
index d3b684ae4..9b09101d4 100644
--- a/app/locales/en.ts
+++ b/app/locales/en.ts
@@ -103,6 +103,7 @@ const en: LocaleType = {
copyLastMessage: "Copy Last Reply",
copyLastCode: "Copy Last Code Block",
showShortcutKey: "Show Shortcuts",
+ clearContext: "Clear Context",
},
},
Export: {
@@ -442,6 +443,17 @@ const en: LocaleType = {
SubTitle: "Example: ",
},
},
+ DeepSeek: {
+ ApiKey: {
+ Title: "DeepSeek API Key",
+ SubTitle: "Use a custom DeepSeek API Key",
+ Placeholder: "DeepSeek API Key",
+ },
+ Endpoint: {
+ Title: "Endpoint Address",
+ SubTitle: "Example: ",
+ },
+ },
XAI: {
ApiKey: {
Title: "XAI API Key",
diff --git a/app/locales/tw.ts b/app/locales/tw.ts
index c800ad15d..83dd547b8 100644
--- a/app/locales/tw.ts
+++ b/app/locales/tw.ts
@@ -100,6 +100,7 @@ const tw = {
copyLastMessage: "複製最後一個回覆",
copyLastCode: "複製最後一個程式碼區塊",
showShortcutKey: "顯示快捷方式",
+ clearContext: "清除上下文",
},
},
Export: {
@@ -485,7 +486,7 @@ const tw = {
},
},
SearchChat: {
- Name: "搜尋",
+ Name: "搜尋聊天記錄",
Page: {
Title: "搜尋聊天記錄",
Search: "輸入搜尋關鍵詞",
diff --git a/app/masks/cn.ts b/app/masks/cn.ts
index ed507d734..64842f6e8 100644
--- a/app/masks/cn.ts
+++ b/app/masks/cn.ts
@@ -3,7 +3,7 @@ import { BuiltinMask } from "./typing";
export const CN_MASKS: BuiltinMask[] = [
{
avatar: "1f5bc-fe0f",
- name: "以文搜图",
+ name: "AI文生图",
context: [
{
id: "text-to-pic-0",
@@ -28,7 +28,7 @@ export const CN_MASKS: BuiltinMask[] = [
id: "text-to-pic-3",
role: "system",
content:
- "助手善于判断用户意图,当确定需要提供图片时,助手会变得沉默寡言,只使用以下格式输出markdown图片:,因为这个语法可以自动按照提示生成并渲染图片。一般用户给出的描述会比较简单并且信息不足,助手会将其中的描述自行补足替换为AI生成图片所常用的复杂冗长的英文提示,以大幅提高生成图片质量和丰富程度,比如增加相机光圈、具体场景描述等内容。助手会避免用代码块或原始块包围markdown标记,因为那样只会渲染出代码块或原始块而不是图片。",
+ "助手善于判断用户意图,当确定需要提供图片时,助手会变得沉默寡言,只使用以下格式输出markdown图片:,因为这个语法可以自动按照提示生成并渲染图片。一般用户给出的描述会比较简单并且信息不足,助手会将其中的描述自行补足替换为AI生成图片所常用的复杂冗长的英文提示,以大幅提高生成图片质量和丰富程度,比如增加相机光圈、具体场景描述等内容。助手会避免用代码块或原始块包围markdown标记,因为那样只会渲染出代码块或原始块而不是图片。url中的空格等符号需要转义。",
date: "",
},
],
diff --git a/app/store/access.ts b/app/store/access.ts
index 4796b2fe8..1fed5dfed 100644
--- a/app/store/access.ts
+++ b/app/store/access.ts
@@ -13,6 +13,7 @@ import {
MOONSHOT_BASE_URL,
STABILITY_BASE_URL,
IFLYTEK_BASE_URL,
+ DEEPSEEK_BASE_URL,
XAI_BASE_URL,
CHATGLM_BASE_URL,
} from "../constant";
@@ -47,6 +48,8 @@ const DEFAULT_STABILITY_URL = isApp ? STABILITY_BASE_URL : ApiPath.Stability;
const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek;
+const DEFAULT_DEEPSEEK_URL = isApp ? DEEPSEEK_BASE_URL : ApiPath.DeepSeek;
+
const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI;
const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM;
@@ -108,6 +111,10 @@ const DEFAULT_ACCESS_STATE = {
iflytekApiKey: "",
iflytekApiSecret: "",
+ // deepseek
+ deepseekUrl: DEFAULT_DEEPSEEK_URL,
+ deepseekApiKey: "",
+
// xai
xaiUrl: DEFAULT_XAI_URL,
xaiApiKey: "",
@@ -124,6 +131,7 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false,
customModels: "",
defaultModel: "",
+ visionModels: "",
// tts config
edgeTTSVoiceName: "zh-CN-YunxiNeural",
@@ -138,7 +146,10 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
-
+ getVisionModels() {
+ this.fetch();
+ return get().visionModels;
+ },
edgeVoiceName() {
this.fetch();
@@ -183,6 +194,9 @@ export const useAccessStore = createPersistStore(
isValidIflytek() {
return ensure(get(), ["iflytekApiKey"]);
},
+ isValidDeepSeek() {
+ return ensure(get(), ["deepseekApiKey"]);
+ },
isValidXAI() {
return ensure(get(), ["xaiApiKey"]);
@@ -207,6 +221,7 @@ export const useAccessStore = createPersistStore(
this.isValidTencent() ||
this.isValidMoonshot() ||
this.isValidIflytek() ||
+ this.isValidDeepSeek() ||
this.isValidXAI() ||
this.isValidChatGLM() ||
!this.enabledAccessControl() ||
diff --git a/app/store/config.ts b/app/store/config.ts
index 4ed335272..9b08fb734 100644
--- a/app/store/config.ts
+++ b/app/store/config.ts
@@ -1,5 +1,5 @@
import { LLMModel } from "../client/api";
-import { DalleSize, DalleQuality, DalleStyle } from "../typing";
+import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { getClientConfig } from "../config/client";
import {
DEFAULT_INPUT_TEMPLATE,
@@ -78,7 +78,7 @@ export const DEFAULT_CONFIG = {
compressProviderName: "",
enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
- size: "1024x1024" as DalleSize,
+ size: "1024x1024" as ModelSize,
quality: "standard" as DalleQuality,
style: "vivid" as DalleStyle,
},
diff --git a/app/typing.ts b/app/typing.ts
index 0336be75d..ecb327936 100644
--- a/app/typing.ts
+++ b/app/typing.ts
@@ -11,3 +11,14 @@ export interface RequestMessage {
export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";
export type DalleQuality = "standard" | "hd";
export type DalleStyle = "vivid" | "natural";
+
+export type ModelSize =
+ | "1024x1024"
+ | "1792x1024"
+ | "1024x1792"
+ | "768x1344"
+ | "864x1152"
+ | "1344x768"
+ | "1152x864"
+ | "1440x720"
+ | "720x1440";
diff --git a/app/utils.ts b/app/utils.ts
index b62bc126d..4f5b7b0b7 100644
--- a/app/utils.ts
+++ b/app/utils.ts
@@ -5,6 +5,9 @@ import { RequestMessage } from "./client/api";
import { ServiceProvider } from "./constant";
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream";
+import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
+import { useAccessStore } from "./store";
+import { ModelSize } from "./typing";
export function trimTopic(topic: string) {
// Fix an issue where double quotes still show in the Indonesian language
@@ -252,27 +255,16 @@ export function getMessageImages(message: RequestMessage): string[] {
}
export function isVisionModel(model: string) {
- // Note: This is a better way using the TypeScript feature instead of `&&` or `||` (ts v5.5.0-dev.20240314 I've been using)
-
- const excludeKeywords = ["claude-3-5-haiku-20241022"];
- const visionKeywords = [
- "vision",
- "gpt-4o",
- "claude-3",
- "gemini-1.5",
- "gemini-exp",
- "learnlm",
- "qwen-vl",
- "qwen2-vl",
- ];
- const isGpt4Turbo =
- model.includes("gpt-4-turbo") && !model.includes("preview");
-
+ const visionModels = useAccessStore.getState().visionModels;
+ const envVisionModels = visionModels
+ ?.split(",")
+ .map((m) => m.trim());
+ if (envVisionModels?.includes(model)) {
+ return true;
+ }
return (
- !excludeKeywords.some((keyword) => model.includes(keyword)) &&
- (visionKeywords.some((keyword) => model.includes(keyword)) ||
- isGpt4Turbo ||
- isDalle3(model))
+ !EXCLUDE_VISION_MODEL_REGEXES.some((regex) => regex.test(model)) &&
+ VISION_MODEL_REGEXES.some((regex) => regex.test(model))
);
}
@@ -280,6 +272,28 @@ export function isDalle3(model: string) {
return "dall-e-3" === model;
}
+export function getModelSizes(model: string): ModelSize[] {
+ if (isDalle3(model)) {
+ return ["1024x1024", "1792x1024", "1024x1792"];
+ }
+ if (model.toLowerCase().includes("cogview")) {
+ return [
+ "1024x1024",
+ "768x1344",
+ "864x1152",
+ "1344x768",
+ "1152x864",
+ "1440x720",
+ "720x1440",
+ ];
+ }
+ return [];
+}
+
+export function supportsCustomSize(model: string): boolean {
+ return getModelSizes(model).length > 0;
+}
+
export function showPlugins(provider: ServiceProvider, model: string) {
if (
provider == ServiceProvider.OpenAI ||
diff --git a/app/utils/model.ts b/app/utils/model.ts
index a1b7df1b6..a1a38a2f8 100644
--- a/app/utils/model.ts
+++ b/app/utils/model.ts
@@ -202,3 +202,52 @@ export function isModelAvailableInServer(
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
return modelTable[fullName]?.available === false;
}
+
+/**
+ * Check if the model name is a GPT-4 related model
+ *
+ * @param modelName The name of the model to check
+ * @returns True if the model is a GPT-4 related model (excluding gpt-4o-mini)
+ */
+export function isGPT4Model(modelName: string): boolean {
+ return (
+ (modelName.startsWith("gpt-4") ||
+ modelName.startsWith("chatgpt-4o") ||
+ modelName.startsWith("o1")) &&
+ !modelName.startsWith("gpt-4o-mini")
+ );
+}
+
+/**
+ * Checks if a model is not available on any of the specified providers in the server.
+ *
+ * @param {string} customModels - A string of custom models, comma-separated.
+ * @param {string} modelName - The name of the model to check.
+ * @param {string|string[]} providerNames - A string or array of provider names to check against.
+ *
+ * @returns {boolean} True if the model is not available on any of the specified providers, false otherwise.
+ */
+export function isModelNotavailableInServer(
+ customModels: string,
+ modelName: string,
+ providerNames: string | string[],
+): boolean {
+ // Check DISABLE_GPT4 environment variable
+ if (
+ process.env.DISABLE_GPT4 === "1" &&
+ isGPT4Model(modelName.toLowerCase())
+ ) {
+ return true;
+ }
+
+ const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
+
+ const providerNamesArray = Array.isArray(providerNames)
+ ? providerNames
+ : [providerNames];
+ for (const providerName of providerNamesArray) {
+ const fullName = `${modelName}@${providerName.toLowerCase()}`;
+ if (modelTable?.[fullName]?.available === true) return false;
+ }
+ return true;
+}
diff --git a/package.json b/package.json
index 39c04368a..12a26cdd3 100644
--- a/package.json
+++ b/package.json
@@ -62,7 +62,7 @@
"@tauri-apps/cli": "1.5.11",
"@testing-library/dom": "^10.4.0",
"@testing-library/jest-dom": "^6.6.3",
- "@testing-library/react": "^16.0.1",
+ "@testing-library/react": "^16.1.0",
"@types/jest": "^29.5.14",
"@types/js-yaml": "4.0.9",
"@types/lodash-es": "^4.17.12",
diff --git a/test/model-available.test.ts b/test/model-available.test.ts
new file mode 100644
index 000000000..5c9fa9977
--- /dev/null
+++ b/test/model-available.test.ts
@@ -0,0 +1,80 @@
+import { isModelNotavailableInServer } from "../app/utils/model";
+
+describe("isModelNotavailableInServer", () => {
+ test("test model will return false, which means the model is available", () => {
+ const customModels = "";
+ const modelName = "gpt-4";
+ const providerNames = "OpenAI";
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(false);
+ });
+
+ test("test model will return true when model is not available in custom models", () => {
+ const customModels = "-all,gpt-4o-mini";
+ const modelName = "gpt-4";
+ const providerNames = "OpenAI";
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(true);
+ });
+
+ test("should respect DISABLE_GPT4 setting", () => {
+ process.env.DISABLE_GPT4 = "1";
+ const result = isModelNotavailableInServer("", "gpt-4", "OpenAI");
+ expect(result).toBe(true);
+ });
+
+ test("should handle empty provider names", () => {
+ const result = isModelNotavailableInServer("-all,gpt-4", "gpt-4", "");
+ expect(result).toBe(true);
+ });
+
+ test("should be case insensitive for model names", () => {
+ const result = isModelNotavailableInServer("-all,GPT-4", "gpt-4", "OpenAI");
+ expect(result).toBe(true);
+ });
+
+ test("support passing multiple providers, model unavailable on one of the providers will return true", () => {
+ const customModels = "-all,gpt-4@google";
+ const modelName = "gpt-4";
+ const providerNames = ["OpenAI", "Azure"];
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(true);
+ });
+
+ // FIXME: 这个测试用例有问题,需要修复
+ // test("support passing multiple providers, model available on one of the providers will return false", () => {
+ // const customModels = "-all,gpt-4@google";
+ // const modelName = "gpt-4";
+ // const providerNames = ["OpenAI", "Google"];
+ // const result = isModelNotavailableInServer(
+ // customModels,
+ // modelName,
+ // providerNames,
+ // );
+ // expect(result).toBe(false);
+ // });
+
+ test("test custom model without setting provider", () => {
+ const customModels = "-all,mistral-large";
+ const modelName = "mistral-large";
+ const providerNames = modelName;
+ const result = isModelNotavailableInServer(
+ customModels,
+ modelName,
+ providerNames,
+ );
+ expect(result).toBe(false);
+ });
+});
diff --git a/test/vision-model-checker.test.ts b/test/vision-model-checker.test.ts
new file mode 100644
index 000000000..734e992d8
--- /dev/null
+++ b/test/vision-model-checker.test.ts
@@ -0,0 +1,67 @@
+import { isVisionModel } from "../app/utils";
+
+describe("isVisionModel", () => {
+ const originalEnv = process.env;
+
+ beforeEach(() => {
+ jest.resetModules();
+ process.env = { ...originalEnv };
+ });
+
+ afterEach(() => {
+ process.env = originalEnv;
+ });
+
+ test("should identify vision models using regex patterns", () => {
+ const visionModels = [
+ "gpt-4-vision",
+ "claude-3-opus",
+ "gemini-1.5-pro",
+ "gemini-2.0",
+ "gemini-exp-vision",
+ "learnlm-vision",
+ "qwen-vl-max",
+ "qwen2-vl-max",
+ "gpt-4-turbo",
+ "dall-e-3",
+ ];
+
+ visionModels.forEach((model) => {
+ expect(isVisionModel(model)).toBe(true);
+ });
+ });
+
+ test("should exclude specific models", () => {
+ expect(isVisionModel("claude-3-5-haiku-20241022")).toBe(false);
+ });
+
+ test("should not identify non-vision models", () => {
+ const nonVisionModels = [
+ "gpt-3.5-turbo",
+ "gpt-4-turbo-preview",
+ "claude-2",
+ "regular-model",
+ ];
+
+ nonVisionModels.forEach((model) => {
+ expect(isVisionModel(model)).toBe(false);
+ });
+ });
+
+ test("should identify models from VISION_MODELS env var", () => {
+ process.env.VISION_MODELS = "custom-vision-model,another-vision-model";
+
+ expect(isVisionModel("custom-vision-model")).toBe(true);
+ expect(isVisionModel("another-vision-model")).toBe(true);
+ expect(isVisionModel("unrelated-model")).toBe(false);
+ });
+
+ test("should handle empty or missing VISION_MODELS", () => {
+ process.env.VISION_MODELS = "";
+ expect(isVisionModel("unrelated-model")).toBe(false);
+
+ delete process.env.VISION_MODELS;
+ expect(isVisionModel("unrelated-model")).toBe(false);
+ expect(isVisionModel("gpt-4-vision")).toBe(true);
+ });
+});
\ No newline at end of file
diff --git a/yarn.lock b/yarn.lock
index b8ecc3f07..d49b9f222 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -2807,10 +2807,10 @@
lodash "^4.17.21"
redent "^3.0.0"
-"@testing-library/react@^16.0.1":
- version "16.0.1"
- resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.0.1.tgz#29c0ee878d672703f5e7579f239005e4e0faa875"
- integrity sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg==
+"@testing-library/react@^16.1.0":
+ version "16.1.0"
+ resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.1.0.tgz#aa0c61398bac82eaf89776967e97de41ac742d71"
+ integrity sha512-Q2ToPvg0KsVL0ohND9A3zLJWcOXXcO8IDu3fj11KhNt0UlCWyFyvnCIBkd12tidB2lkiVRG8VFqdhcqhqnAQtg==
dependencies:
"@babel/runtime" "^7.12.5"