diff --git a/README.md b/README.md
index 73650ecc0..70fee646a 100644
--- a/README.md
+++ b/README.md
@@ -190,6 +190,7 @@ Cloudflare R2 访问密钥 ID,使用 `DALL-E` 插件时需要配置。
### `R2_SECRET_ACCESS_KEY` (可选)
Cloudflare R2 机密访问密钥,使用 `DALL-E` 插件时需要配置。
+
### `R2_BUCKET` (可选)
Cloudflare R2 Bucket 名称,使用 `DALL-E` 插件时需要配置。
diff --git a/README_CN.md b/README_CN.md
index 13b97417d..d8e9553e1 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -106,6 +106,12 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填
如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。
+### `CUSTOM_MODELS` (可选)
+
+> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`。
+
+用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,用英文逗号隔开。
+
## 开发
点击下方按钮,开始二次开发:
diff --git a/app/api/common.ts b/app/api/common.ts
index 0af7761d8..a1decd42f 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -1,10 +1,9 @@
import { NextRequest, NextResponse } from "next/server";
+import { getServerSideConfig } from "../config/server";
+import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
+import { collectModelTable, collectModels } from "../utils/model";
-export const OPENAI_URL = "api.openai.com";
-const DEFAULT_PROTOCOL = "https";
-const PROTOCOL = process.env.PROTOCOL || DEFAULT_PROTOCOL;
-const BASE_URL = process.env.BASE_URL || OPENAI_URL;
-const DISABLE_GPT4 = !!process.env.DISABLE_GPT4;
+const serverConfig = getServerSideConfig();
export async function requestOpenai(req: NextRequest) {
const controller = new AbortController();
@@ -14,10 +13,10 @@ export async function requestOpenai(req: NextRequest) {
"",
);
- let baseUrl = BASE_URL;
+ let baseUrl = serverConfig.baseUrl ?? OPENAI_BASE_URL;
if (!baseUrl.startsWith("http")) {
- baseUrl = `${PROTOCOL}://${baseUrl}`;
+ baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
@@ -26,10 +25,7 @@ export async function requestOpenai(req: NextRequest) {
console.log("[Proxy] ", openaiPath);
console.log("[Base Url]", baseUrl);
-
- if (process.env.OPENAI_ORG_ID) {
- console.log("[Org ID]", process.env.OPENAI_ORG_ID);
- }
+ console.log("[Org ID]", serverConfig.openaiOrgId);
const timeoutId = setTimeout(
() => {
@@ -58,18 +54,23 @@ export async function requestOpenai(req: NextRequest) {
};
// #1815 try to refuse gpt4 request
- if (DISABLE_GPT4 && req.body) {
+ if (serverConfig.customModels && req.body) {
try {
+ const modelTable = collectModelTable(
+ DEFAULT_MODELS,
+ serverConfig.customModels,
+ );
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
- const jsonBody = JSON.parse(clonedBody);
+ const jsonBody = JSON.parse(clonedBody) as { model?: string };
- if ((jsonBody?.model ?? "").includes("gpt-4")) {
+ // not undefined and is false
+ if (modelTable[jsonBody?.model ?? ""] === false) {
return NextResponse.json(
{
error: true,
- message: "you are not allowed to use gpt-4 model",
+ message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
diff --git a/app/api/config/route.ts b/app/api/config/route.ts
index 44af8d3b9..db84fba17 100644
--- a/app/api/config/route.ts
+++ b/app/api/config/route.ts
@@ -12,6 +12,7 @@ const DANGER_CONFIG = {
disableGPT4: serverConfig.disableGPT4,
hideBalanceQuery: serverConfig.hideBalanceQuery,
disableFastLink: serverConfig.disableFastLink,
+ customModels: serverConfig.customModels,
};
declare global {
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index 378994e4e..79e2e8385 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -77,6 +77,7 @@ export class ChatGPTApi implements LLMApi {
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
+ max_tokens: Math.max(modelConfig.max_tokens, 1024),
};
console.log("[Request] openai payload: ", requestPayload);
diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx
index 7ba555852..f76b369f1 100644
--- a/app/components/chat-list.tsx
+++ b/app/components/chat-list.tsx
@@ -18,6 +18,7 @@ import { MaskAvatar } from "./mask";
import { Mask } from "../store/mask";
import { useRef, useEffect } from "react";
import { showConfirm } from "./ui-lib";
+import { useMobileScreen } from "../utils";
export function ChatItem(props: {
onClick?: () => void;
@@ -80,7 +81,11 @@ export function ChatItem(props: {
{
+ props.onDelete?.();
+ e.preventDefault();
+ e.stopPropagation();
+ }}
>
@@ -101,6 +106,7 @@ export function ChatList(props: { narrow?: boolean }) {
);
const chatStore = useChatStore();
const navigate = useNavigate();
+ const isMobileScreen = useMobileScreen();
const onDragEnd: OnDragEndResponder = (result) => {
const { destination, source } = result;
@@ -142,7 +148,7 @@ export function ChatList(props: { narrow?: boolean }) {
}}
onDelete={async () => {
if (
- !props.narrow ||
+ (!props.narrow && !isMobileScreen) ||
(await showConfirm(Locale.Home.DeleteChat))
) {
chatStore.deleteSession(i);
diff --git a/app/components/chat.tsx b/app/components/chat.tsx
index 6b4b88972..65cd99abd 100644
--- a/app/components/chat.tsx
+++ b/app/components/chat.tsx
@@ -91,6 +91,7 @@ import { ChatCommandPrefix, useChatCommand, useCommand } from "../command";
import { prettyObject } from "../utils/format";
import { ExportMessageModal } from "./exporter";
import { getClientConfig } from "../config/client";
+import { useAllModels } from "../utils/hooks";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => ,
@@ -146,6 +147,7 @@ export function SessionConfigModel(props: { onClose: () => void }) {
extraListItems={
session.mask.modelConfig.sendMemory ? (
@@ -440,14 +442,9 @@ export function ChatActions(props: {
// switch model
const currentModel = chatStore.currentSession().mask.modelConfig.model;
- const models = useMemo(
- () =>
- config
- .allModels()
- .filter((m) => m.available)
- .map((m) => m.name),
- [config],
- );
+ const models = useAllModels()
+ .filter((m) => m.available)
+ .map((m) => m.name);
const [showModelSelector, setShowModelSelector] = useState(false);
return (
diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx
index 63950a40d..1c730e144 100644
--- a/app/components/model-config.tsx
+++ b/app/components/model-config.tsx
@@ -1,14 +1,15 @@
-import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store";
+import { ModalConfigValidator, ModelConfig } from "../store";
import Locale from "../locales";
import { InputRange } from "./input-range";
import { ListItem, Select } from "./ui-lib";
+import { useAllModels } from "../utils/hooks";
export function ModelConfigList(props: {
modelConfig: ModelConfig;
updateConfig: (updater: (config: ModelConfig) => void) => void;
}) {
- const config = useAppConfig();
+ const allModels = useAllModels();
return (
<>
@@ -24,7 +25,7 @@ export function ModelConfigList(props: {
);
}}
>
- {config.allModels().map((v, i) => (
+ {allModels.map((v, i) => (
@@ -75,8 +76,8 @@ export function ModelConfigList(props: {
>
props.updateConfig(
diff --git a/app/components/sidebar.tsx b/app/components/sidebar.tsx
index 4061b2238..2500ebc0f 100644
--- a/app/components/sidebar.tsx
+++ b/app/components/sidebar.tsx
@@ -1,4 +1,4 @@
-import { useEffect, useRef, useCallback, useMemo } from "react";
+import { useEffect, useRef, useMemo } from "react";
import styles from "./home.module.scss";
@@ -8,6 +8,7 @@ import GithubIcon from "../icons/github.svg";
import ChatGptIcon from "../icons/chatgpt.svg";
import AddIcon from "../icons/add.svg";
import CloseIcon from "../icons/close.svg";
+import DeleteIcon from "../icons/delete.svg";
import MaskIcon from "../icons/mask.svg";
import PluginIcon from "../icons/plugin.svg";
import DragIcon from "../icons/drag.svg";
@@ -202,7 +203,7 @@ export function SideBar(props: { className?: string }) {
}
+ icon={}
onClick={async () => {
if (await showConfirm(Locale.Home.DeleteChat)) {
chatStore.deleteSession(chatStore.currentSessionIndex);
diff --git a/app/config/server.ts b/app/config/server.ts
index 2df806fed..007c39738 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -1,4 +1,5 @@
import md5 from "spark-md5";
+import { DEFAULT_MODELS } from "../constant";
declare global {
namespace NodeJS {
@@ -7,6 +8,7 @@ declare global {
CODE?: string;
BASE_URL?: string;
PROXY_URL?: string;
+ OPENAI_ORG_ID?: string;
VERCEL?: string;
HIDE_USER_API_KEY?: string; // disable user's api key input
DISABLE_GPT4?: string; // allow user to use gpt-4 or not
@@ -14,6 +16,7 @@ declare global {
BUILD_APP?: string; // is building desktop app
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
+ CUSTOM_MODELS?: string; // to control custom models
}
}
}
@@ -38,6 +41,16 @@ export const getServerSideConfig = () => {
);
}
+ let disableGPT4 = !!process.env.DISABLE_GPT4;
+ let customModels = process.env.CUSTOM_MODELS ?? "";
+
+ if (disableGPT4) {
+ if (customModels) customModels += ",";
+ customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4"))
+ .map((m) => "-" + m.name)
+ .join(",");
+ }
+
return {
apiKey: process.env.OPENAI_API_KEY,
code: process.env.CODE,
@@ -45,10 +58,12 @@ export const getServerSideConfig = () => {
needCode: ACCESS_CODES.size > 0,
baseUrl: process.env.BASE_URL,
proxyUrl: process.env.PROXY_URL,
+ openaiOrgId: process.env.OPENAI_ORG_ID,
isVercel: !!process.env.VERCEL,
hideUserApiKey: !!process.env.HIDE_USER_API_KEY,
- disableGPT4: !!process.env.DISABLE_GPT4,
+ disableGPT4,
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
disableFastLink: !!process.env.DISABLE_FAST_LINK,
+ customModels,
};
};
diff --git a/app/constant.ts b/app/constant.ts
index ca4f3f2e4..5be0ea161 100644
--- a/app/constant.ts
+++ b/app/constant.ts
@@ -82,7 +82,6 @@ export const SUMMARIZE_MODEL = "gpt-3.5-turbo";
export const KnowledgeCutOffDate: Record = {
default: "2021-09",
- "gpt-3.5-turbo-1106": "2023-04",
"gpt-4-1106-preview": "2023-04",
"gpt-4-vision-preview": "2023-04",
};
diff --git a/app/store/access.ts b/app/store/access.ts
index 3d889f6e7..f87e44a2a 100644
--- a/app/store/access.ts
+++ b/app/store/access.ts
@@ -17,6 +17,7 @@ const DEFAULT_ACCESS_STATE = {
hideBalanceQuery: false,
disableGPT4: false,
disableFastLink: false,
+ customModels: "",
openaiUrl: DEFAULT_OPENAI_URL,
};
@@ -52,12 +53,6 @@ export const useAccessStore = createPersistStore(
.then((res: DangerConfig) => {
console.log("[Config] got config from server", res);
set(() => ({ ...res }));
-
- if (res.disableGPT4) {
- DEFAULT_MODELS.forEach(
- (m: any) => (m.available = !m.name.startsWith("gpt-4")),
- );
- }
})
.catch(() => {
console.error("[Config] failed to fetch config");
diff --git a/app/store/chat.ts b/app/store/chat.ts
index ea48948fc..3118abb13 100644
--- a/app/store/chat.ts
+++ b/app/store/chat.ts
@@ -93,33 +93,6 @@ function getSummarizeModel(currentModel: string) {
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
}
-interface ChatStore {
- sessions: ChatSession[];
- currentSessionIndex: number;
- clearSessions: () => void;
- moveSession: (from: number, to: number) => void;
- selectSession: (index: number) => void;
- newSession: (mask?: Mask) => void;
- deleteSession: (index: number) => void;
- currentSession: () => ChatSession;
- nextSession: (delta: number) => void;
- onNewMessage: (message: ChatMessage) => void;
- onUserInput: (content: string) => Promise;
- summarizeSession: () => void;
- updateStat: (message: ChatMessage) => void;
- updateCurrentSession: (updater: (session: ChatSession) => void) => void;
- updateMessage: (
- sessionIndex: number,
- messageIndex: number,
- updater: (message?: ChatMessage) => void,
- ) => void;
- resetSession: () => void;
- getMessagesWithMemory: () => ChatMessage[];
- getMemoryPrompt: () => ChatMessage;
-
- clearAllData: () => void;
-}
-
function countMessages(msgs: ChatMessage[]) {
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
}
diff --git a/app/store/config.ts b/app/store/config.ts
index 31df71d56..1b3192065 100644
--- a/app/store/config.ts
+++ b/app/store/config.ts
@@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
model: "gpt-3.5-turbo" as ModelType,
temperature: 0.5,
top_p: 1,
- max_tokens: 2000,
+ max_tokens: 4000,
presence_penalty: 0,
frequency_penalty: 0,
sendMemory: true,
@@ -89,7 +89,7 @@ export const ModalConfigValidator = {
return x as ModelType;
},
max_tokens(x: number) {
- return limitNumber(x, 0, 100000, 2000);
+ return limitNumber(x, 0, 512000, 1024);
},
presence_penalty(x: number) {
return limitNumber(x, -2, 2, 0);
@@ -135,15 +135,7 @@ export const useAppConfig = createPersistStore(
}));
},
- allModels() {
- const customModels = get()
- .customModels.split(",")
- .filter((v) => !!v && v.length > 0)
- .map((m) => ({ name: m, available: true }));
- const allModels = get().models.concat(customModels);
- allModels.sort((a, b) => (a.name < b.name ? -1 : 1));
- return allModels;
- },
+ allModels() {},
}),
{
name: StoreKey.Config,
diff --git a/app/styles/globals.scss b/app/styles/globals.scss
index def28680c..aa22b7d4f 100644
--- a/app/styles/globals.scss
+++ b/app/styles/globals.scss
@@ -357,3 +357,7 @@ pre {
overflow: hidden;
text-overflow: ellipsis;
}
+
+.copyable {
+ user-select: text;
+}
diff --git a/app/utils/hooks.ts b/app/utils/hooks.ts
new file mode 100644
index 000000000..f6bfae673
--- /dev/null
+++ b/app/utils/hooks.ts
@@ -0,0 +1,16 @@
+import { useMemo } from "react";
+import { useAccessStore, useAppConfig } from "../store";
+import { collectModels } from "./model";
+
+export function useAllModels() {
+ const accessStore = useAccessStore();
+ const configStore = useAppConfig();
+ const models = useMemo(() => {
+ return collectModels(
+ configStore.models,
+ [accessStore.customModels, configStore.customModels].join(","),
+ );
+ }, [accessStore.customModels, configStore.customModels, configStore.models]);
+
+ return models;
+}
diff --git a/app/utils/model.ts b/app/utils/model.ts
new file mode 100644
index 000000000..23090f9d2
--- /dev/null
+++ b/app/utils/model.ts
@@ -0,0 +1,40 @@
+import { LLMModel } from "../client/api";
+
+export function collectModelTable(
+ models: readonly LLMModel[],
+ customModels: string,
+) {
+ const modelTable: Record = {};
+
+ // default models
+ models.forEach((m) => (modelTable[m.name] = m.available));
+
+ // server custom models
+ customModels
+ .split(",")
+ .filter((v) => !!v && v.length > 0)
+ .map((m) => {
+ if (m.startsWith("+")) {
+ modelTable[m.slice(1)] = true;
+ } else if (m.startsWith("-")) {
+ modelTable[m.slice(1)] = false;
+ } else modelTable[m] = true;
+ });
+ return modelTable;
+}
+
+/**
+ * Generate full model table.
+ */
+export function collectModels(
+ models: readonly LLMModel[],
+ customModels: string,
+) {
+ const modelTable = collectModelTable(models, customModels);
+ const allModels = Object.keys(modelTable).map((m) => ({
+ name: m,
+ available: modelTable[m],
+ }));
+
+ return allModels;
+}