Merge remote-tracking branch 'upstream/main'
This commit is contained in:
commit
c637826ebe
|
@ -190,6 +190,7 @@ Cloudflare R2 访问密钥 ID,使用 `DALL-E` 插件时需要配置。
|
|||
### `R2_SECRET_ACCESS_KEY` (可选)
|
||||
|
||||
Cloudflare R2 机密访问密钥,使用 `DALL-E` 插件时需要配置。
|
||||
|
||||
### `R2_BUCKET` (可选)
|
||||
|
||||
Cloudflare R2 Bucket 名称,使用 `DALL-E` 插件时需要配置。
|
||||
|
|
|
@ -106,6 +106,12 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填
|
|||
|
||||
如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。
|
||||
|
||||
### `CUSTOM_MODELS` (可选)
|
||||
|
||||
> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`。
|
||||
|
||||
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,用英文逗号隔开。
|
||||
|
||||
## 开发
|
||||
|
||||
点击下方按钮,开始二次开发:
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
|
||||
import { collectModelTable, collectModels } from "../utils/model";
|
||||
|
||||
export const OPENAI_URL = "api.openai.com";
|
||||
const DEFAULT_PROTOCOL = "https";
|
||||
const PROTOCOL = process.env.PROTOCOL || DEFAULT_PROTOCOL;
|
||||
const BASE_URL = process.env.BASE_URL || OPENAI_URL;
|
||||
const DISABLE_GPT4 = !!process.env.DISABLE_GPT4;
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
export async function requestOpenai(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
@ -14,10 +13,10 @@ export async function requestOpenai(req: NextRequest) {
|
|||
"",
|
||||
);
|
||||
|
||||
let baseUrl = BASE_URL;
|
||||
let baseUrl = serverConfig.baseUrl ?? OPENAI_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `${PROTOCOL}://${baseUrl}`;
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
@ -26,10 +25,7 @@ export async function requestOpenai(req: NextRequest) {
|
|||
|
||||
console.log("[Proxy] ", openaiPath);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
|
||||
if (process.env.OPENAI_ORG_ID) {
|
||||
console.log("[Org ID]", process.env.OPENAI_ORG_ID);
|
||||
}
|
||||
console.log("[Org ID]", serverConfig.openaiOrgId);
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
|
@ -58,18 +54,23 @@ export async function requestOpenai(req: NextRequest) {
|
|||
};
|
||||
|
||||
// #1815 try to refuse gpt4 request
|
||||
if (DISABLE_GPT4 && req.body) {
|
||||
if (serverConfig.customModels && req.body) {
|
||||
try {
|
||||
const modelTable = collectModelTable(
|
||||
DEFAULT_MODELS,
|
||||
serverConfig.customModels,
|
||||
);
|
||||
const clonedBody = await req.text();
|
||||
fetchOptions.body = clonedBody;
|
||||
|
||||
const jsonBody = JSON.parse(clonedBody);
|
||||
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||
|
||||
if ((jsonBody?.model ?? "").includes("gpt-4")) {
|
||||
// not undefined and is false
|
||||
if (modelTable[jsonBody?.model ?? ""] === false) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: true,
|
||||
message: "you are not allowed to use gpt-4 model",
|
||||
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
|
|
|
@ -12,6 +12,7 @@ const DANGER_CONFIG = {
|
|||
disableGPT4: serverConfig.disableGPT4,
|
||||
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
||||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
};
|
||||
|
||||
declare global {
|
||||
|
|
|
@ -77,6 +77,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
|
|
@ -18,6 +18,7 @@ import { MaskAvatar } from "./mask";
|
|||
import { Mask } from "../store/mask";
|
||||
import { useRef, useEffect } from "react";
|
||||
import { showConfirm } from "./ui-lib";
|
||||
import { useMobileScreen } from "../utils";
|
||||
|
||||
export function ChatItem(props: {
|
||||
onClick?: () => void;
|
||||
|
@ -80,7 +81,11 @@ export function ChatItem(props: {
|
|||
|
||||
<div
|
||||
className={styles["chat-item-delete"]}
|
||||
onClickCapture={props.onDelete}
|
||||
onClickCapture={(e) => {
|
||||
props.onDelete?.();
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
}}
|
||||
>
|
||||
<DeleteIcon />
|
||||
</div>
|
||||
|
@ -101,6 +106,7 @@ export function ChatList(props: { narrow?: boolean }) {
|
|||
);
|
||||
const chatStore = useChatStore();
|
||||
const navigate = useNavigate();
|
||||
const isMobileScreen = useMobileScreen();
|
||||
|
||||
const onDragEnd: OnDragEndResponder = (result) => {
|
||||
const { destination, source } = result;
|
||||
|
@ -142,7 +148,7 @@ export function ChatList(props: { narrow?: boolean }) {
|
|||
}}
|
||||
onDelete={async () => {
|
||||
if (
|
||||
!props.narrow ||
|
||||
(!props.narrow && !isMobileScreen) ||
|
||||
(await showConfirm(Locale.Home.DeleteChat))
|
||||
) {
|
||||
chatStore.deleteSession(i);
|
||||
|
|
|
@ -91,6 +91,7 @@ import { ChatCommandPrefix, useChatCommand, useCommand } from "../command";
|
|||
import { prettyObject } from "../utils/format";
|
||||
import { ExportMessageModal } from "./exporter";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { useAllModels } from "../utils/hooks";
|
||||
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
loading: () => <LoadingIcon />,
|
||||
|
@ -146,6 +147,7 @@ export function SessionConfigModel(props: { onClose: () => void }) {
|
|||
extraListItems={
|
||||
session.mask.modelConfig.sendMemory ? (
|
||||
<ListItem
|
||||
className="copyable"
|
||||
title={`${Locale.Memory.Title} (${session.lastSummarizeIndex} of ${session.messages.length})`}
|
||||
subTitle={session.memoryPrompt || Locale.Memory.EmptyContent}
|
||||
></ListItem>
|
||||
|
@ -440,14 +442,9 @@ export function ChatActions(props: {
|
|||
|
||||
// switch model
|
||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||
const models = useMemo(
|
||||
() =>
|
||||
config
|
||||
.allModels()
|
||||
.filter((m) => m.available)
|
||||
.map((m) => m.name),
|
||||
[config],
|
||||
);
|
||||
const models = useAllModels()
|
||||
.filter((m) => m.available)
|
||||
.map((m) => m.name);
|
||||
const [showModelSelector, setShowModelSelector] = useState(false);
|
||||
|
||||
return (
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store";
|
||||
import { ModalConfigValidator, ModelConfig } from "../store";
|
||||
|
||||
import Locale from "../locales";
|
||||
import { InputRange } from "./input-range";
|
||||
import { ListItem, Select } from "./ui-lib";
|
||||
import { useAllModels } from "../utils/hooks";
|
||||
|
||||
export function ModelConfigList(props: {
|
||||
modelConfig: ModelConfig;
|
||||
updateConfig: (updater: (config: ModelConfig) => void) => void;
|
||||
}) {
|
||||
const config = useAppConfig();
|
||||
const allModels = useAllModels();
|
||||
|
||||
return (
|
||||
<>
|
||||
|
@ -24,7 +25,7 @@ export function ModelConfigList(props: {
|
|||
);
|
||||
}}
|
||||
>
|
||||
{config.allModels().map((v, i) => (
|
||||
{allModels.map((v, i) => (
|
||||
<option value={v.name} key={i} disabled={!v.available}>
|
||||
{v.name}
|
||||
</option>
|
||||
|
@ -75,8 +76,8 @@ export function ModelConfigList(props: {
|
|||
>
|
||||
<input
|
||||
type="number"
|
||||
min={100}
|
||||
max={100000}
|
||||
min={1024}
|
||||
max={512000}
|
||||
value={props.modelConfig.max_tokens}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
import { useEffect, useRef, useCallback, useMemo } from "react";
|
||||
import { useEffect, useRef, useMemo } from "react";
|
||||
|
||||
import styles from "./home.module.scss";
|
||||
|
||||
|
@ -8,6 +8,7 @@ import GithubIcon from "../icons/github.svg";
|
|||
import ChatGptIcon from "../icons/chatgpt.svg";
|
||||
import AddIcon from "../icons/add.svg";
|
||||
import CloseIcon from "../icons/close.svg";
|
||||
import DeleteIcon from "../icons/delete.svg";
|
||||
import MaskIcon from "../icons/mask.svg";
|
||||
import PluginIcon from "../icons/plugin.svg";
|
||||
import DragIcon from "../icons/drag.svg";
|
||||
|
@ -202,7 +203,7 @@ export function SideBar(props: { className?: string }) {
|
|||
<div className={styles["sidebar-actions"]}>
|
||||
<div className={styles["sidebar-action"] + " " + styles.mobile}>
|
||||
<IconButton
|
||||
icon={<CloseIcon />}
|
||||
icon={<DeleteIcon />}
|
||||
onClick={async () => {
|
||||
if (await showConfirm(Locale.Home.DeleteChat)) {
|
||||
chatStore.deleteSession(chatStore.currentSessionIndex);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import md5 from "spark-md5";
|
||||
import { DEFAULT_MODELS } from "../constant";
|
||||
|
||||
declare global {
|
||||
namespace NodeJS {
|
||||
|
@ -7,6 +8,7 @@ declare global {
|
|||
CODE?: string;
|
||||
BASE_URL?: string;
|
||||
PROXY_URL?: string;
|
||||
OPENAI_ORG_ID?: string;
|
||||
VERCEL?: string;
|
||||
HIDE_USER_API_KEY?: string; // disable user's api key input
|
||||
DISABLE_GPT4?: string; // allow user to use gpt-4 or not
|
||||
|
@ -14,6 +16,7 @@ declare global {
|
|||
BUILD_APP?: string; // is building desktop app
|
||||
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
|
||||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||
CUSTOM_MODELS?: string; // to control custom models
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +41,16 @@ export const getServerSideConfig = () => {
|
|||
);
|
||||
}
|
||||
|
||||
let disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||
|
||||
if (disableGPT4) {
|
||||
if (customModels) customModels += ",";
|
||||
customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4"))
|
||||
.map((m) => "-" + m.name)
|
||||
.join(",");
|
||||
}
|
||||
|
||||
return {
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
code: process.env.CODE,
|
||||
|
@ -45,10 +58,12 @@ export const getServerSideConfig = () => {
|
|||
needCode: ACCESS_CODES.size > 0,
|
||||
baseUrl: process.env.BASE_URL,
|
||||
proxyUrl: process.env.PROXY_URL,
|
||||
openaiOrgId: process.env.OPENAI_ORG_ID,
|
||||
isVercel: !!process.env.VERCEL,
|
||||
hideUserApiKey: !!process.env.HIDE_USER_API_KEY,
|
||||
disableGPT4: !!process.env.DISABLE_GPT4,
|
||||
disableGPT4,
|
||||
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
|
||||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||
customModels,
|
||||
};
|
||||
};
|
||||
|
|
|
@ -82,7 +82,6 @@ export const SUMMARIZE_MODEL = "gpt-3.5-turbo";
|
|||
|
||||
export const KnowledgeCutOffDate: Record<string, string> = {
|
||||
default: "2021-09",
|
||||
"gpt-3.5-turbo-1106": "2023-04",
|
||||
"gpt-4-1106-preview": "2023-04",
|
||||
"gpt-4-vision-preview": "2023-04",
|
||||
};
|
||||
|
|
|
@ -17,6 +17,7 @@ const DEFAULT_ACCESS_STATE = {
|
|||
hideBalanceQuery: false,
|
||||
disableGPT4: false,
|
||||
disableFastLink: false,
|
||||
customModels: "",
|
||||
|
||||
openaiUrl: DEFAULT_OPENAI_URL,
|
||||
};
|
||||
|
@ -52,12 +53,6 @@ export const useAccessStore = createPersistStore(
|
|||
.then((res: DangerConfig) => {
|
||||
console.log("[Config] got config from server", res);
|
||||
set(() => ({ ...res }));
|
||||
|
||||
if (res.disableGPT4) {
|
||||
DEFAULT_MODELS.forEach(
|
||||
(m: any) => (m.available = !m.name.startsWith("gpt-4")),
|
||||
);
|
||||
}
|
||||
})
|
||||
.catch(() => {
|
||||
console.error("[Config] failed to fetch config");
|
||||
|
|
|
@ -93,33 +93,6 @@ function getSummarizeModel(currentModel: string) {
|
|||
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
|
||||
}
|
||||
|
||||
interface ChatStore {
|
||||
sessions: ChatSession[];
|
||||
currentSessionIndex: number;
|
||||
clearSessions: () => void;
|
||||
moveSession: (from: number, to: number) => void;
|
||||
selectSession: (index: number) => void;
|
||||
newSession: (mask?: Mask) => void;
|
||||
deleteSession: (index: number) => void;
|
||||
currentSession: () => ChatSession;
|
||||
nextSession: (delta: number) => void;
|
||||
onNewMessage: (message: ChatMessage) => void;
|
||||
onUserInput: (content: string) => Promise<void>;
|
||||
summarizeSession: () => void;
|
||||
updateStat: (message: ChatMessage) => void;
|
||||
updateCurrentSession: (updater: (session: ChatSession) => void) => void;
|
||||
updateMessage: (
|
||||
sessionIndex: number,
|
||||
messageIndex: number,
|
||||
updater: (message?: ChatMessage) => void,
|
||||
) => void;
|
||||
resetSession: () => void;
|
||||
getMessagesWithMemory: () => ChatMessage[];
|
||||
getMemoryPrompt: () => ChatMessage;
|
||||
|
||||
clearAllData: () => void;
|
||||
}
|
||||
|
||||
function countMessages(msgs: ChatMessage[]) {
|
||||
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
|
|||
model: "gpt-3.5-turbo" as ModelType,
|
||||
temperature: 0.5,
|
||||
top_p: 1,
|
||||
max_tokens: 2000,
|
||||
max_tokens: 4000,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
sendMemory: true,
|
||||
|
@ -89,7 +89,7 @@ export const ModalConfigValidator = {
|
|||
return x as ModelType;
|
||||
},
|
||||
max_tokens(x: number) {
|
||||
return limitNumber(x, 0, 100000, 2000);
|
||||
return limitNumber(x, 0, 512000, 1024);
|
||||
},
|
||||
presence_penalty(x: number) {
|
||||
return limitNumber(x, -2, 2, 0);
|
||||
|
@ -135,15 +135,7 @@ export const useAppConfig = createPersistStore(
|
|||
}));
|
||||
},
|
||||
|
||||
allModels() {
|
||||
const customModels = get()
|
||||
.customModels.split(",")
|
||||
.filter((v) => !!v && v.length > 0)
|
||||
.map((m) => ({ name: m, available: true }));
|
||||
const allModels = get().models.concat(customModels);
|
||||
allModels.sort((a, b) => (a.name < b.name ? -1 : 1));
|
||||
return allModels;
|
||||
},
|
||||
allModels() {},
|
||||
}),
|
||||
{
|
||||
name: StoreKey.Config,
|
||||
|
|
|
@ -357,3 +357,7 @@ pre {
|
|||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
.copyable {
|
||||
user-select: text;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
import { useMemo } from "react";
|
||||
import { useAccessStore, useAppConfig } from "../store";
|
||||
import { collectModels } from "./model";
|
||||
|
||||
export function useAllModels() {
|
||||
const accessStore = useAccessStore();
|
||||
const configStore = useAppConfig();
|
||||
const models = useMemo(() => {
|
||||
return collectModels(
|
||||
configStore.models,
|
||||
[accessStore.customModels, configStore.customModels].join(","),
|
||||
);
|
||||
}, [accessStore.customModels, configStore.customModels, configStore.models]);
|
||||
|
||||
return models;
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
import { LLMModel } from "../client/api";
|
||||
|
||||
export function collectModelTable(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
) {
|
||||
const modelTable: Record<string, boolean> = {};
|
||||
|
||||
// default models
|
||||
models.forEach((m) => (modelTable[m.name] = m.available));
|
||||
|
||||
// server custom models
|
||||
customModels
|
||||
.split(",")
|
||||
.filter((v) => !!v && v.length > 0)
|
||||
.map((m) => {
|
||||
if (m.startsWith("+")) {
|
||||
modelTable[m.slice(1)] = true;
|
||||
} else if (m.startsWith("-")) {
|
||||
modelTable[m.slice(1)] = false;
|
||||
} else modelTable[m] = true;
|
||||
});
|
||||
return modelTable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate full model table.
|
||||
*/
|
||||
export function collectModels(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
) {
|
||||
const modelTable = collectModelTable(models, customModels);
|
||||
const allModels = Object.keys(modelTable).map((m) => ({
|
||||
name: m,
|
||||
available: modelTable[m],
|
||||
}));
|
||||
|
||||
return allModels;
|
||||
}
|
Loading…
Reference in New Issue