Merge branch 'main' of https://github.com/Yidadaa/ChatGPT-Next-Web
This commit is contained in:
commit
592a26e8ab
|
@ -17,11 +17,6 @@ BASE_URL=
|
||||||
# Default: Empty
|
# Default: Empty
|
||||||
OPENAI_ORG_ID=
|
OPENAI_ORG_ID=
|
||||||
|
|
||||||
# (optional)
|
|
||||||
# Default: Empty
|
|
||||||
# If you do not want users to input their own API key, set this value to 1.
|
|
||||||
HIDE_USER_API_KEY=
|
|
||||||
|
|
||||||
# (optional)
|
# (optional)
|
||||||
# Default: Empty
|
# Default: Empty
|
||||||
# If you do not want users to use GPT-4, set this value to 1.
|
# If you do not want users to use GPT-4, set this value to 1.
|
||||||
|
@ -29,5 +24,15 @@ DISABLE_GPT4=
|
||||||
|
|
||||||
# (optional)
|
# (optional)
|
||||||
# Default: Empty
|
# Default: Empty
|
||||||
# If you do not want users to query balance, set this value to 1.
|
# If you do not want users to input their own API key, set this value to 1.
|
||||||
HIDE_BALANCE_QUERY=
|
HIDE_USER_API_KEY=
|
||||||
|
|
||||||
|
# (optional)
|
||||||
|
# Default: Empty
|
||||||
|
# If you do want users to query balance, set this value to 1.
|
||||||
|
ENABLE_BALANCE_QUERY=
|
||||||
|
|
||||||
|
# (optional)
|
||||||
|
# Default: Empty
|
||||||
|
# If you want to disable parse settings from url, set this value to 1.
|
||||||
|
DISABLE_FAST_LINK=
|
||||||
|
|
|
@ -197,6 +197,13 @@ If you do want users to query balance, set this value to 1, or you should set it
|
||||||
|
|
||||||
If you want to disable parse settings from url, set this to 1.
|
If you want to disable parse settings from url, set this to 1.
|
||||||
|
|
||||||
|
### `CUSTOM_MODELS` (optional)
|
||||||
|
|
||||||
|
> Default: Empty
|
||||||
|
> Example: `+llama,+claude-2,-gpt-3.5-turbo` means add `llama, claude-2` to model list, and remove `gpt-3.5-turbo` from list.
|
||||||
|
|
||||||
|
To control custom models, use `+` to add a custom model, use `-` to hide a model, separated by comma.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
NodeJS >= 18, Docker >= 20
|
NodeJS >= 18, Docker >= 20
|
||||||
|
|
|
@ -106,6 +106,12 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填
|
||||||
|
|
||||||
如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。
|
如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。
|
||||||
|
|
||||||
|
### `CUSTOM_MODELS` (可选)
|
||||||
|
|
||||||
|
> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`。
|
||||||
|
|
||||||
|
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,用英文逗号隔开。
|
||||||
|
|
||||||
## 开发
|
## 开发
|
||||||
|
|
||||||
点击下方按钮,开始二次开发:
|
点击下方按钮,开始二次开发:
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
import { NextRequest, NextResponse } from "next/server";
|
import { NextRequest, NextResponse } from "next/server";
|
||||||
|
import { getServerSideConfig } from "../config/server";
|
||||||
|
import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant";
|
||||||
|
import { collectModelTable, collectModels } from "../utils/model";
|
||||||
|
|
||||||
export const OPENAI_URL = "api.openai.com";
|
const serverConfig = getServerSideConfig();
|
||||||
const DEFAULT_PROTOCOL = "https";
|
|
||||||
const PROTOCOL = process.env.PROTOCOL || DEFAULT_PROTOCOL;
|
|
||||||
const BASE_URL = process.env.BASE_URL || OPENAI_URL;
|
|
||||||
const DISABLE_GPT4 = !!process.env.DISABLE_GPT4;
|
|
||||||
|
|
||||||
export async function requestOpenai(req: NextRequest) {
|
export async function requestOpenai(req: NextRequest) {
|
||||||
const controller = new AbortController();
|
const controller = new AbortController();
|
||||||
|
@ -14,26 +13,26 @@ export async function requestOpenai(req: NextRequest) {
|
||||||
"",
|
"",
|
||||||
);
|
);
|
||||||
|
|
||||||
let baseUrl = BASE_URL;
|
let baseUrl = serverConfig.baseUrl ?? OPENAI_BASE_URL;
|
||||||
|
|
||||||
if (!baseUrl.startsWith("http")) {
|
if (!baseUrl.startsWith("http")) {
|
||||||
baseUrl = `${PROTOCOL}://${baseUrl}`;
|
baseUrl = `https://${baseUrl}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (baseUrl.endsWith('/')) {
|
if (baseUrl.endsWith("/")) {
|
||||||
baseUrl = baseUrl.slice(0, -1);
|
baseUrl = baseUrl.slice(0, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log("[Proxy] ", openaiPath);
|
console.log("[Proxy] ", openaiPath);
|
||||||
console.log("[Base Url]", baseUrl);
|
console.log("[Base Url]", baseUrl);
|
||||||
|
console.log("[Org ID]", serverConfig.openaiOrgId);
|
||||||
|
|
||||||
if (process.env.OPENAI_ORG_ID) {
|
const timeoutId = setTimeout(
|
||||||
console.log("[Org ID]", process.env.OPENAI_ORG_ID);
|
() => {
|
||||||
}
|
controller.abort();
|
||||||
|
},
|
||||||
const timeoutId = setTimeout(() => {
|
10 * 60 * 1000,
|
||||||
controller.abort();
|
);
|
||||||
}, 10 * 60 * 1000);
|
|
||||||
|
|
||||||
const fetchUrl = `${baseUrl}/${openaiPath}`;
|
const fetchUrl = `${baseUrl}/${openaiPath}`;
|
||||||
const fetchOptions: RequestInit = {
|
const fetchOptions: RequestInit = {
|
||||||
|
@ -55,18 +54,23 @@ export async function requestOpenai(req: NextRequest) {
|
||||||
};
|
};
|
||||||
|
|
||||||
// #1815 try to refuse gpt4 request
|
// #1815 try to refuse gpt4 request
|
||||||
if (DISABLE_GPT4 && req.body) {
|
if (serverConfig.customModels && req.body) {
|
||||||
try {
|
try {
|
||||||
|
const modelTable = collectModelTable(
|
||||||
|
DEFAULT_MODELS,
|
||||||
|
serverConfig.customModels,
|
||||||
|
);
|
||||||
const clonedBody = await req.text();
|
const clonedBody = await req.text();
|
||||||
fetchOptions.body = clonedBody;
|
fetchOptions.body = clonedBody;
|
||||||
|
|
||||||
const jsonBody = JSON.parse(clonedBody);
|
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||||
|
|
||||||
if ((jsonBody?.model ?? "").includes("gpt-4")) {
|
// not undefined and is false
|
||||||
|
if (modelTable[jsonBody?.model ?? ""] === false) {
|
||||||
return NextResponse.json(
|
return NextResponse.json(
|
||||||
{
|
{
|
||||||
error: true,
|
error: true,
|
||||||
message: "you are not allowed to use gpt-4 model",
|
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
status: 403,
|
status: 403,
|
||||||
|
|
|
@ -12,6 +12,7 @@ const DANGER_CONFIG = {
|
||||||
disableGPT4: serverConfig.disableGPT4,
|
disableGPT4: serverConfig.disableGPT4,
|
||||||
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
||||||
disableFastLink: serverConfig.disableFastLink,
|
disableFastLink: serverConfig.disableFastLink,
|
||||||
|
customModels: serverConfig.customModels,
|
||||||
};
|
};
|
||||||
|
|
||||||
declare global {
|
declare global {
|
||||||
|
|
|
@ -70,6 +70,7 @@ export class ChatGPTApi implements LLMApi {
|
||||||
presence_penalty: modelConfig.presence_penalty,
|
presence_penalty: modelConfig.presence_penalty,
|
||||||
frequency_penalty: modelConfig.frequency_penalty,
|
frequency_penalty: modelConfig.frequency_penalty,
|
||||||
top_p: modelConfig.top_p,
|
top_p: modelConfig.top_p,
|
||||||
|
max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||||
};
|
};
|
||||||
|
|
||||||
console.log("[Request] openai payload: ", requestPayload);
|
console.log("[Request] openai payload: ", requestPayload);
|
||||||
|
|
|
@ -18,6 +18,7 @@ import { MaskAvatar } from "./mask";
|
||||||
import { Mask } from "../store/mask";
|
import { Mask } from "../store/mask";
|
||||||
import { useRef, useEffect } from "react";
|
import { useRef, useEffect } from "react";
|
||||||
import { showConfirm } from "./ui-lib";
|
import { showConfirm } from "./ui-lib";
|
||||||
|
import { useMobileScreen } from "../utils";
|
||||||
|
|
||||||
export function ChatItem(props: {
|
export function ChatItem(props: {
|
||||||
onClick?: () => void;
|
onClick?: () => void;
|
||||||
|
@ -80,7 +81,11 @@ export function ChatItem(props: {
|
||||||
|
|
||||||
<div
|
<div
|
||||||
className={styles["chat-item-delete"]}
|
className={styles["chat-item-delete"]}
|
||||||
onClickCapture={props.onDelete}
|
onClickCapture={(e) => {
|
||||||
|
props.onDelete?.();
|
||||||
|
e.preventDefault();
|
||||||
|
e.stopPropagation();
|
||||||
|
}}
|
||||||
>
|
>
|
||||||
<DeleteIcon />
|
<DeleteIcon />
|
||||||
</div>
|
</div>
|
||||||
|
@ -101,6 +106,7 @@ export function ChatList(props: { narrow?: boolean }) {
|
||||||
);
|
);
|
||||||
const chatStore = useChatStore();
|
const chatStore = useChatStore();
|
||||||
const navigate = useNavigate();
|
const navigate = useNavigate();
|
||||||
|
const isMobileScreen = useMobileScreen();
|
||||||
|
|
||||||
const onDragEnd: OnDragEndResponder = (result) => {
|
const onDragEnd: OnDragEndResponder = (result) => {
|
||||||
const { destination, source } = result;
|
const { destination, source } = result;
|
||||||
|
@ -142,7 +148,7 @@ export function ChatList(props: { narrow?: boolean }) {
|
||||||
}}
|
}}
|
||||||
onDelete={async () => {
|
onDelete={async () => {
|
||||||
if (
|
if (
|
||||||
!props.narrow ||
|
(!props.narrow && !isMobileScreen) ||
|
||||||
(await showConfirm(Locale.Home.DeleteChat))
|
(await showConfirm(Locale.Home.DeleteChat))
|
||||||
) {
|
) {
|
||||||
chatStore.deleteSession(i);
|
chatStore.deleteSession(i);
|
||||||
|
|
|
@ -88,6 +88,7 @@ import { ChatCommandPrefix, useChatCommand, useCommand } from "../command";
|
||||||
import { prettyObject } from "../utils/format";
|
import { prettyObject } from "../utils/format";
|
||||||
import { ExportMessageModal } from "./exporter";
|
import { ExportMessageModal } from "./exporter";
|
||||||
import { getClientConfig } from "../config/client";
|
import { getClientConfig } from "../config/client";
|
||||||
|
import { useAllModels } from "../utils/hooks";
|
||||||
|
|
||||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||||
loading: () => <LoadingIcon />,
|
loading: () => <LoadingIcon />,
|
||||||
|
@ -143,6 +144,7 @@ export function SessionConfigModel(props: { onClose: () => void }) {
|
||||||
extraListItems={
|
extraListItems={
|
||||||
session.mask.modelConfig.sendMemory ? (
|
session.mask.modelConfig.sendMemory ? (
|
||||||
<ListItem
|
<ListItem
|
||||||
|
className="copyable"
|
||||||
title={`${Locale.Memory.Title} (${session.lastSummarizeIndex} of ${session.messages.length})`}
|
title={`${Locale.Memory.Title} (${session.lastSummarizeIndex} of ${session.messages.length})`}
|
||||||
subTitle={session.memoryPrompt || Locale.Memory.EmptyContent}
|
subTitle={session.memoryPrompt || Locale.Memory.EmptyContent}
|
||||||
></ListItem>
|
></ListItem>
|
||||||
|
@ -429,14 +431,9 @@ export function ChatActions(props: {
|
||||||
|
|
||||||
// switch model
|
// switch model
|
||||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||||
const models = useMemo(
|
const models = useAllModels()
|
||||||
() =>
|
.filter((m) => m.available)
|
||||||
config
|
.map((m) => m.name);
|
||||||
.allModels()
|
|
||||||
.filter((m) => m.available)
|
|
||||||
.map((m) => m.name),
|
|
||||||
[config],
|
|
||||||
);
|
|
||||||
const [showModelSelector, setShowModelSelector] = useState(false);
|
const [showModelSelector, setShowModelSelector] = useState(false);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store";
|
import { ModalConfigValidator, ModelConfig } from "../store";
|
||||||
|
|
||||||
import Locale from "../locales";
|
import Locale from "../locales";
|
||||||
import { InputRange } from "./input-range";
|
import { InputRange } from "./input-range";
|
||||||
import { ListItem, Select } from "./ui-lib";
|
import { ListItem, Select } from "./ui-lib";
|
||||||
|
import { useAllModels } from "../utils/hooks";
|
||||||
|
|
||||||
export function ModelConfigList(props: {
|
export function ModelConfigList(props: {
|
||||||
modelConfig: ModelConfig;
|
modelConfig: ModelConfig;
|
||||||
updateConfig: (updater: (config: ModelConfig) => void) => void;
|
updateConfig: (updater: (config: ModelConfig) => void) => void;
|
||||||
}) {
|
}) {
|
||||||
const config = useAppConfig();
|
const allModels = useAllModels();
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
|
@ -24,7 +25,7 @@ export function ModelConfigList(props: {
|
||||||
);
|
);
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{config.allModels().map((v, i) => (
|
{allModels.map((v, i) => (
|
||||||
<option value={v.name} key={i} disabled={!v.available}>
|
<option value={v.name} key={i} disabled={!v.available}>
|
||||||
{v.name}
|
{v.name}
|
||||||
</option>
|
</option>
|
||||||
|
@ -75,8 +76,8 @@ export function ModelConfigList(props: {
|
||||||
>
|
>
|
||||||
<input
|
<input
|
||||||
type="number"
|
type="number"
|
||||||
min={100}
|
min={1024}
|
||||||
max={100000}
|
max={512000}
|
||||||
value={props.modelConfig.max_tokens}
|
value={props.modelConfig.max_tokens}
|
||||||
onChange={(e) =>
|
onChange={(e) =>
|
||||||
props.updateConfig(
|
props.updateConfig(
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import { useEffect, useRef, useCallback, useMemo } from "react";
|
import { useEffect, useRef, useMemo } from "react";
|
||||||
|
|
||||||
import styles from "./home.module.scss";
|
import styles from "./home.module.scss";
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ import GithubIcon from "../icons/github.svg";
|
||||||
import ChatGptIcon from "../icons/chatgpt.svg";
|
import ChatGptIcon from "../icons/chatgpt.svg";
|
||||||
import AddIcon from "../icons/add.svg";
|
import AddIcon from "../icons/add.svg";
|
||||||
import CloseIcon from "../icons/close.svg";
|
import CloseIcon from "../icons/close.svg";
|
||||||
|
import DeleteIcon from "../icons/delete.svg";
|
||||||
import MaskIcon from "../icons/mask.svg";
|
import MaskIcon from "../icons/mask.svg";
|
||||||
import PluginIcon from "../icons/plugin.svg";
|
import PluginIcon from "../icons/plugin.svg";
|
||||||
import DragIcon from "../icons/drag.svg";
|
import DragIcon from "../icons/drag.svg";
|
||||||
|
@ -202,7 +203,7 @@ export function SideBar(props: { className?: string }) {
|
||||||
<div className={styles["sidebar-actions"]}>
|
<div className={styles["sidebar-actions"]}>
|
||||||
<div className={styles["sidebar-action"] + " " + styles.mobile}>
|
<div className={styles["sidebar-action"] + " " + styles.mobile}>
|
||||||
<IconButton
|
<IconButton
|
||||||
icon={<CloseIcon />}
|
icon={<DeleteIcon />}
|
||||||
onClick={async () => {
|
onClick={async () => {
|
||||||
if (await showConfirm(Locale.Home.DeleteChat)) {
|
if (await showConfirm(Locale.Home.DeleteChat)) {
|
||||||
chatStore.deleteSession(chatStore.currentSessionIndex);
|
chatStore.deleteSession(chatStore.currentSessionIndex);
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
import md5 from "spark-md5";
|
import md5 from "spark-md5";
|
||||||
|
import { DEFAULT_MODELS } from "../constant";
|
||||||
|
|
||||||
declare global {
|
declare global {
|
||||||
namespace NodeJS {
|
namespace NodeJS {
|
||||||
|
@ -7,6 +8,7 @@ declare global {
|
||||||
CODE?: string;
|
CODE?: string;
|
||||||
BASE_URL?: string;
|
BASE_URL?: string;
|
||||||
PROXY_URL?: string;
|
PROXY_URL?: string;
|
||||||
|
OPENAI_ORG_ID?: string;
|
||||||
VERCEL?: string;
|
VERCEL?: string;
|
||||||
HIDE_USER_API_KEY?: string; // disable user's api key input
|
HIDE_USER_API_KEY?: string; // disable user's api key input
|
||||||
DISABLE_GPT4?: string; // allow user to use gpt-4 or not
|
DISABLE_GPT4?: string; // allow user to use gpt-4 or not
|
||||||
|
@ -14,6 +16,7 @@ declare global {
|
||||||
BUILD_APP?: string; // is building desktop app
|
BUILD_APP?: string; // is building desktop app
|
||||||
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
|
ENABLE_BALANCE_QUERY?: string; // allow user to query balance or not
|
||||||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||||
|
CUSTOM_MODELS?: string; // to control custom models
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +41,16 @@ export const getServerSideConfig = () => {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||||
|
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||||
|
|
||||||
|
if (disableGPT4) {
|
||||||
|
if (customModels) customModels += ",";
|
||||||
|
customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4"))
|
||||||
|
.map((m) => "-" + m.name)
|
||||||
|
.join(",");
|
||||||
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
apiKey: process.env.OPENAI_API_KEY,
|
apiKey: process.env.OPENAI_API_KEY,
|
||||||
code: process.env.CODE,
|
code: process.env.CODE,
|
||||||
|
@ -45,10 +58,12 @@ export const getServerSideConfig = () => {
|
||||||
needCode: ACCESS_CODES.size > 0,
|
needCode: ACCESS_CODES.size > 0,
|
||||||
baseUrl: process.env.BASE_URL,
|
baseUrl: process.env.BASE_URL,
|
||||||
proxyUrl: process.env.PROXY_URL,
|
proxyUrl: process.env.PROXY_URL,
|
||||||
|
openaiOrgId: process.env.OPENAI_ORG_ID,
|
||||||
isVercel: !!process.env.VERCEL,
|
isVercel: !!process.env.VERCEL,
|
||||||
hideUserApiKey: !!process.env.HIDE_USER_API_KEY,
|
hideUserApiKey: !!process.env.HIDE_USER_API_KEY,
|
||||||
disableGPT4: !!process.env.DISABLE_GPT4,
|
disableGPT4,
|
||||||
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
|
hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY,
|
||||||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||||
|
customModels,
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -17,6 +17,7 @@ const DEFAULT_ACCESS_STATE = {
|
||||||
hideBalanceQuery: false,
|
hideBalanceQuery: false,
|
||||||
disableGPT4: false,
|
disableGPT4: false,
|
||||||
disableFastLink: false,
|
disableFastLink: false,
|
||||||
|
customModels: "",
|
||||||
|
|
||||||
openaiUrl: DEFAULT_OPENAI_URL,
|
openaiUrl: DEFAULT_OPENAI_URL,
|
||||||
};
|
};
|
||||||
|
@ -52,12 +53,6 @@ export const useAccessStore = createPersistStore(
|
||||||
.then((res: DangerConfig) => {
|
.then((res: DangerConfig) => {
|
||||||
console.log("[Config] got config from server", res);
|
console.log("[Config] got config from server", res);
|
||||||
set(() => ({ ...res }));
|
set(() => ({ ...res }));
|
||||||
|
|
||||||
if (res.disableGPT4) {
|
|
||||||
DEFAULT_MODELS.forEach(
|
|
||||||
(m: any) => (m.available = !m.name.startsWith("gpt-4")),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.catch(() => {
|
.catch(() => {
|
||||||
console.error("[Config] failed to fetch config");
|
console.error("[Config] failed to fetch config");
|
||||||
|
|
|
@ -85,33 +85,6 @@ function getSummarizeModel(currentModel: string) {
|
||||||
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
|
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
|
||||||
}
|
}
|
||||||
|
|
||||||
interface ChatStore {
|
|
||||||
sessions: ChatSession[];
|
|
||||||
currentSessionIndex: number;
|
|
||||||
clearSessions: () => void;
|
|
||||||
moveSession: (from: number, to: number) => void;
|
|
||||||
selectSession: (index: number) => void;
|
|
||||||
newSession: (mask?: Mask) => void;
|
|
||||||
deleteSession: (index: number) => void;
|
|
||||||
currentSession: () => ChatSession;
|
|
||||||
nextSession: (delta: number) => void;
|
|
||||||
onNewMessage: (message: ChatMessage) => void;
|
|
||||||
onUserInput: (content: string) => Promise<void>;
|
|
||||||
summarizeSession: () => void;
|
|
||||||
updateStat: (message: ChatMessage) => void;
|
|
||||||
updateCurrentSession: (updater: (session: ChatSession) => void) => void;
|
|
||||||
updateMessage: (
|
|
||||||
sessionIndex: number,
|
|
||||||
messageIndex: number,
|
|
||||||
updater: (message?: ChatMessage) => void,
|
|
||||||
) => void;
|
|
||||||
resetSession: () => void;
|
|
||||||
getMessagesWithMemory: () => ChatMessage[];
|
|
||||||
getMemoryPrompt: () => ChatMessage;
|
|
||||||
|
|
||||||
clearAllData: () => void;
|
|
||||||
}
|
|
||||||
|
|
||||||
function countMessages(msgs: ChatMessage[]) {
|
function countMessages(msgs: ChatMessage[]) {
|
||||||
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
|
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ export const DEFAULT_CONFIG = {
|
||||||
model: "gpt-3.5-turbo" as ModelType,
|
model: "gpt-3.5-turbo" as ModelType,
|
||||||
temperature: 0.5,
|
temperature: 0.5,
|
||||||
top_p: 1,
|
top_p: 1,
|
||||||
max_tokens: 2000,
|
max_tokens: 8192,
|
||||||
presence_penalty: 0,
|
presence_penalty: 0,
|
||||||
frequency_penalty: 0,
|
frequency_penalty: 0,
|
||||||
sendMemory: true,
|
sendMemory: true,
|
||||||
|
@ -82,7 +82,7 @@ export const ModalConfigValidator = {
|
||||||
return x as ModelType;
|
return x as ModelType;
|
||||||
},
|
},
|
||||||
max_tokens(x: number) {
|
max_tokens(x: number) {
|
||||||
return limitNumber(x, 0, 100000, 2000);
|
return limitNumber(x, 0, 512000, 1024);
|
||||||
},
|
},
|
||||||
presence_penalty(x: number) {
|
presence_penalty(x: number) {
|
||||||
return limitNumber(x, -2, 2, 0);
|
return limitNumber(x, -2, 2, 0);
|
||||||
|
@ -128,15 +128,7 @@ export const useAppConfig = createPersistStore(
|
||||||
}));
|
}));
|
||||||
},
|
},
|
||||||
|
|
||||||
allModels() {
|
allModels() {},
|
||||||
const customModels = get()
|
|
||||||
.customModels.split(",")
|
|
||||||
.filter((v) => !!v && v.length > 0)
|
|
||||||
.map((m) => ({ name: m, available: true }));
|
|
||||||
const allModels = get().models.concat(customModels);
|
|
||||||
allModels.sort((a, b) => (a.name < b.name ? -1 : 1));
|
|
||||||
return allModels;
|
|
||||||
},
|
|
||||||
}),
|
}),
|
||||||
{
|
{
|
||||||
name: StoreKey.Config,
|
name: StoreKey.Config,
|
||||||
|
|
|
@ -357,3 +357,7 @@ pre {
|
||||||
overflow: hidden;
|
overflow: hidden;
|
||||||
text-overflow: ellipsis;
|
text-overflow: ellipsis;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.copyable {
|
||||||
|
user-select: text;
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,16 @@
|
||||||
|
import { useMemo } from "react";
|
||||||
|
import { useAccessStore, useAppConfig } from "../store";
|
||||||
|
import { collectModels } from "./model";
|
||||||
|
|
||||||
|
export function useAllModels() {
|
||||||
|
const accessStore = useAccessStore();
|
||||||
|
const configStore = useAppConfig();
|
||||||
|
const models = useMemo(() => {
|
||||||
|
return collectModels(
|
||||||
|
configStore.models,
|
||||||
|
[accessStore.customModels, configStore.customModels].join(","),
|
||||||
|
);
|
||||||
|
}, [accessStore.customModels, configStore.customModels, configStore.models]);
|
||||||
|
|
||||||
|
return models;
|
||||||
|
}
|
|
@ -0,0 +1,40 @@
|
||||||
|
import { LLMModel } from "../client/api";
|
||||||
|
|
||||||
|
export function collectModelTable(
|
||||||
|
models: readonly LLMModel[],
|
||||||
|
customModels: string,
|
||||||
|
) {
|
||||||
|
const modelTable: Record<string, boolean> = {};
|
||||||
|
|
||||||
|
// default models
|
||||||
|
models.forEach((m) => (modelTable[m.name] = m.available));
|
||||||
|
|
||||||
|
// server custom models
|
||||||
|
customModels
|
||||||
|
.split(",")
|
||||||
|
.filter((v) => !!v && v.length > 0)
|
||||||
|
.map((m) => {
|
||||||
|
if (m.startsWith("+")) {
|
||||||
|
modelTable[m.slice(1)] = true;
|
||||||
|
} else if (m.startsWith("-")) {
|
||||||
|
modelTable[m.slice(1)] = false;
|
||||||
|
} else modelTable[m] = true;
|
||||||
|
});
|
||||||
|
return modelTable;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate full model table.
|
||||||
|
*/
|
||||||
|
export function collectModels(
|
||||||
|
models: readonly LLMModel[],
|
||||||
|
customModels: string,
|
||||||
|
) {
|
||||||
|
const modelTable = collectModelTable(models, customModels);
|
||||||
|
const allModels = Object.keys(modelTable).map((m) => ({
|
||||||
|
name: m,
|
||||||
|
available: modelTable[m],
|
||||||
|
}));
|
||||||
|
|
||||||
|
return allModels;
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
version: '3.9'
|
version: "3.9"
|
||||||
services:
|
services:
|
||||||
chatgpt-next-web:
|
chatgpt-next-web:
|
||||||
profiles: ["no-proxy"]
|
profiles: ["no-proxy"]
|
||||||
container_name: chatgpt-next-web
|
container_name: chatgpt-next-web
|
||||||
image: yidadaa/chatgpt-next-web
|
image: yidadaa/chatgpt-next-web
|
||||||
|
@ -13,8 +13,11 @@ services:
|
||||||
- OPENAI_ORG_ID=$OPENAI_ORG_ID
|
- OPENAI_ORG_ID=$OPENAI_ORG_ID
|
||||||
- HIDE_USER_API_KEY=$HIDE_USER_API_KEY
|
- HIDE_USER_API_KEY=$HIDE_USER_API_KEY
|
||||||
- DISABLE_GPT4=$DISABLE_GPT4
|
- DISABLE_GPT4=$DISABLE_GPT4
|
||||||
|
- ENABLE_BALANCE_QUERY=$ENABLE_BALANCE_QUERY
|
||||||
|
- DISABLE_FAST_LINK=$DISABLE_FAST_LINK
|
||||||
|
- OPENAI_SB=$OPENAI_SB
|
||||||
|
|
||||||
chatgpt-next-web-proxy:
|
chatgpt-next-web-proxy:
|
||||||
profiles: ["proxy"]
|
profiles: ["proxy"]
|
||||||
container_name: chatgpt-next-web-proxy
|
container_name: chatgpt-next-web-proxy
|
||||||
image: yidadaa/chatgpt-next-web
|
image: yidadaa/chatgpt-next-web
|
||||||
|
@ -28,3 +31,6 @@ services:
|
||||||
- OPENAI_ORG_ID=$OPENAI_ORG_ID
|
- OPENAI_ORG_ID=$OPENAI_ORG_ID
|
||||||
- HIDE_USER_API_KEY=$HIDE_USER_API_KEY
|
- HIDE_USER_API_KEY=$HIDE_USER_API_KEY
|
||||||
- DISABLE_GPT4=$DISABLE_GPT4
|
- DISABLE_GPT4=$DISABLE_GPT4
|
||||||
|
- ENABLE_BALANCE_QUERY=$ENABLE_BALANCE_QUERY
|
||||||
|
- DISABLE_FAST_LINK=$DISABLE_FAST_LINK
|
||||||
|
- OPENAI_SB=$OPENAI_SB
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
# Cloudflare Pages 部署指南
|
# Cloudflare Pages 部署指南
|
||||||
|
|
||||||
## 如何新建项目
|
## 如何新建项目
|
||||||
|
|
||||||
在 Github 上 fork 本项目,然后登录到 dash.cloudflare.com 并进入 Pages。
|
在 Github 上 fork 本项目,然后登录到 dash.cloudflare.com 并进入 Pages。
|
||||||
|
|
||||||
1. 点击 "Create a project"。
|
1. 点击 "Create a project"。
|
||||||
|
@ -12,7 +13,7 @@
|
||||||
7. 在 "Build Settings" 中,选择 "Framework presets" 选项并选择 "Next.js"。
|
7. 在 "Build Settings" 中,选择 "Framework presets" 选项并选择 "Next.js"。
|
||||||
8. 由于 node:buffer 的 bug,暂时不要使用默认的 "Build command"。请使用以下命令:
|
8. 由于 node:buffer 的 bug,暂时不要使用默认的 "Build command"。请使用以下命令:
|
||||||
```
|
```
|
||||||
npx https://prerelease-registry.devprod.cloudflare.dev/next-on-pages/runs/4930842298/npm-package-next-on-pages-230 --experimental-minify
|
npx @cloudflare/next-on-pages@1.5.0
|
||||||
```
|
```
|
||||||
9. 对于 "Build output directory",使用默认值并且不要修改。
|
9. 对于 "Build output directory",使用默认值并且不要修改。
|
||||||
10. 不要修改 "Root Directory"。
|
10. 不要修改 "Root Directory"。
|
||||||
|
@ -30,10 +31,12 @@
|
||||||
- `OPENAI_ORG_ID= 可选填,指定 OpenAI 中的组织 ID`
|
- `OPENAI_ORG_ID= 可选填,指定 OpenAI 中的组织 ID`
|
||||||
- `HIDE_USER_API_KEY=1 可选,不让用户自行填入 API Key`
|
- `HIDE_USER_API_KEY=1 可选,不让用户自行填入 API Key`
|
||||||
- `DISABLE_GPT4=1 可选,不让用户使用 GPT-4`
|
- `DISABLE_GPT4=1 可选,不让用户使用 GPT-4`
|
||||||
|
- `ENABLE_BALANCE_QUERY=1 可选,启用余额查询功能`
|
||||||
|
- `DISABLE_FAST_LINK=1 可选,禁用从链接解析预制设置`
|
||||||
|
|
||||||
12. 点击 "Save and Deploy"。
|
12. 点击 "Save and Deploy"。
|
||||||
13. 点击 "Cancel deployment",因为需要填写 Compatibility flags。
|
13. 点击 "Cancel deployment",因为需要填写 Compatibility flags。
|
||||||
14. 前往 "Build settings"、"Functions",找到 "Compatibility flags"。
|
14. 前往 "Build settings"、"Functions",找到 "Compatibility flags"。
|
||||||
15. 在 "Configure Production compatibility flag" 和 "Configure Preview compatibility flag" 中填写 "nodejs_compat"。
|
15. 在 "Configure Production compatibility flag" 和 "Configure Preview compatibility flag" 中填写 "nodejs_compat"。
|
||||||
16. 前往 "Deployments",点击 "Retry deployment"。
|
16. 前往 "Deployments",点击 "Retry deployment"。
|
||||||
17. Enjoy.
|
17. Enjoy.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
# Cloudflare Pages Deployment Guide
|
# Cloudflare Pages Deployment Guide
|
||||||
|
|
||||||
## How to create a new project
|
## How to create a new project
|
||||||
|
|
||||||
Fork this project on GitHub, then log in to dash.cloudflare.com and go to Pages.
|
Fork this project on GitHub, then log in to dash.cloudflare.com and go to Pages.
|
||||||
|
|
||||||
1. Click "Create a project".
|
1. Click "Create a project".
|
||||||
|
@ -11,12 +12,13 @@ Fork this project on GitHub, then log in to dash.cloudflare.com and go to Pages.
|
||||||
6. For "Project name" and "Production branch", use the default values or change them as needed.
|
6. For "Project name" and "Production branch", use the default values or change them as needed.
|
||||||
7. In "Build Settings", choose the "Framework presets" option and select "Next.js".
|
7. In "Build Settings", choose the "Framework presets" option and select "Next.js".
|
||||||
8. Do not use the default "Build command" due to a node:buffer bug. Instead, use the following command:
|
8. Do not use the default "Build command" due to a node:buffer bug. Instead, use the following command:
|
||||||
```
|
```
|
||||||
npx @cloudflare/next-on-pages --experimental-minify
|
npx @cloudflare/next-on-pages --experimental-minify
|
||||||
```
|
```
|
||||||
9. For "Build output directory", use the default value and do not modify it.
|
9. For "Build output directory", use the default value and do not modify it.
|
||||||
10. Do not modify "Root Directory".
|
10. Do not modify "Root Directory".
|
||||||
11. For "Environment variables", click ">" and then "Add variable". Fill in the following information:
|
11. For "Environment variables", click ">" and then "Add variable". Fill in the following information:
|
||||||
|
|
||||||
- `NODE_VERSION=20.1`
|
- `NODE_VERSION=20.1`
|
||||||
- `NEXT_TELEMETRY_DISABLE=1`
|
- `NEXT_TELEMETRY_DISABLE=1`
|
||||||
- `OPENAI_API_KEY=your_own_API_key`
|
- `OPENAI_API_KEY=your_own_API_key`
|
||||||
|
@ -29,7 +31,10 @@ Fork this project on GitHub, then log in to dash.cloudflare.com and go to Pages.
|
||||||
- `OPENAI_ORG_ID= Optional, specify the organization ID in OpenAI`
|
- `OPENAI_ORG_ID= Optional, specify the organization ID in OpenAI`
|
||||||
- `HIDE_USER_API_KEY=1 Optional, do not allow users to enter their own API key`
|
- `HIDE_USER_API_KEY=1 Optional, do not allow users to enter their own API key`
|
||||||
- `DISABLE_GPT4=1 Optional, do not allow users to use GPT-4`
|
- `DISABLE_GPT4=1 Optional, do not allow users to use GPT-4`
|
||||||
|
- `ENABLE_BALANCE_QUERY=1 Optional, allow users to query balance`
|
||||||
|
- `DISABLE_FAST_LINK=1 Optional, disable parse settings from url`
|
||||||
|
- `OPENAI_SB=1 Optional,use the third-party OpenAI-SB API`
|
||||||
|
|
||||||
12. Click "Save and Deploy".
|
12. Click "Save and Deploy".
|
||||||
13. Click "Cancel deployment" because you need to fill in Compatibility flags.
|
13. Click "Cancel deployment" because you need to fill in Compatibility flags.
|
||||||
14. Go to "Build settings", "Functions", and find "Compatibility flags".
|
14. Go to "Build settings", "Functions", and find "Compatibility flags".
|
||||||
|
|
Loading…
Reference in New Issue