mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-08 23:20:28 +08:00
Merge branch 'main' of https://github.com/Yidadaa/ChatGPT-Next-Web
This commit is contained in:
@@ -3,7 +3,6 @@ import { persist } from "zustand/middleware";
|
||||
import { DEFAULT_API_HOST, StoreKey } from "../constant";
|
||||
import { getHeaders } from "../client/api";
|
||||
import { BOT_HELLO } from "./chat";
|
||||
import { ALL_MODELS } from "./config";
|
||||
import { getClientConfig } from "../config/client";
|
||||
|
||||
export interface AccessControlStore {
|
||||
@@ -76,14 +75,6 @@ export const useAccessStore = create<AccessControlStore>()(
|
||||
console.log("[Config] got config from server", res);
|
||||
set(() => ({ ...res }));
|
||||
|
||||
if (!res.enableGPT4) {
|
||||
ALL_MODELS.forEach((model) => {
|
||||
if (model.name.startsWith("gpt-4")) {
|
||||
(model as any).available = false;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if ((res as any).botHello) {
|
||||
BOT_HELLO.content = (res as any).botHello;
|
||||
}
|
||||
|
@@ -1,7 +1,10 @@
|
||||
import { create } from "zustand";
|
||||
import { persist } from "zustand/middleware";
|
||||
import { LLMModel } from "../client/api";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant";
|
||||
import { DEFAULT_INPUT_TEMPLATE, DEFAULT_MODELS, StoreKey } from "../constant";
|
||||
|
||||
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
|
||||
|
||||
export enum SubmitKey {
|
||||
Enter = "Enter",
|
||||
@@ -31,6 +34,8 @@ export const DEFAULT_CONFIG = {
|
||||
dontShowMaskSplashScreen: false, // dont show splash screen when create chat
|
||||
dontAddBuiltinMasks: false, // dont add builtin masks
|
||||
|
||||
models: DEFAULT_MODELS as any as LLMModel[],
|
||||
|
||||
modelConfig: {
|
||||
model: "gpt-3.5-turbo" as ModelType,
|
||||
temperature: 0.5,
|
||||
@@ -50,81 +55,11 @@ export type ChatConfig = typeof DEFAULT_CONFIG;
|
||||
export type ChatConfigStore = ChatConfig & {
|
||||
reset: () => void;
|
||||
update: (updater: (config: ChatConfig) => void) => void;
|
||||
mergeModels: (newModels: LLMModel[]) => void;
|
||||
};
|
||||
|
||||
export type ModelConfig = ChatConfig["modelConfig"];
|
||||
|
||||
const ENABLE_GPT4 = true;
|
||||
|
||||
export const ALL_MODELS = [
|
||||
{
|
||||
name: "gpt-4",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0314",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-0613",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k-0314",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-4-32k-0613",
|
||||
available: ENABLE_GPT4,
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0301",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-0613",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "gpt-3.5-turbo-16k-0613",
|
||||
available: true,
|
||||
},
|
||||
{
|
||||
name: "qwen-v1", // 通义千问
|
||||
available: false,
|
||||
},
|
||||
{
|
||||
name: "ernie", // 文心一言
|
||||
available: false,
|
||||
},
|
||||
{
|
||||
name: "spark", // 讯飞星火
|
||||
available: false,
|
||||
},
|
||||
{
|
||||
name: "llama", // llama
|
||||
available: false,
|
||||
},
|
||||
{
|
||||
name: "chatglm", // chatglm-6b
|
||||
available: false,
|
||||
},
|
||||
] as const;
|
||||
|
||||
export type ModelType = (typeof ALL_MODELS)[number]["name"];
|
||||
|
||||
export function limitNumber(
|
||||
x: number,
|
||||
min: number,
|
||||
@@ -139,7 +74,8 @@ export function limitNumber(
|
||||
}
|
||||
|
||||
export function limitModel(name: string) {
|
||||
return ALL_MODELS.some((m) => m.name === name && m.available)
|
||||
const allModels = useAppConfig.getState().models;
|
||||
return allModels.some((m) => m.name === name && m.available)
|
||||
? name
|
||||
: "gpt-3.5-turbo";
|
||||
}
|
||||
@@ -179,6 +115,25 @@ export const useAppConfig = create<ChatConfigStore>()(
|
||||
updater(config);
|
||||
set(() => config);
|
||||
},
|
||||
|
||||
mergeModels(newModels) {
|
||||
const oldModels = get().models;
|
||||
const modelMap: Record<string, LLMModel> = {};
|
||||
|
||||
for (const model of oldModels) {
|
||||
model.available = false;
|
||||
modelMap[model.name] = model;
|
||||
}
|
||||
|
||||
for (const model of newModels) {
|
||||
model.available = true;
|
||||
modelMap[model.name] = model;
|
||||
}
|
||||
|
||||
set(() => ({
|
||||
models: Object.values(modelMap),
|
||||
}));
|
||||
},
|
||||
}),
|
||||
{
|
||||
name: StoreKey.Config,
|
||||
|
Reference in New Issue
Block a user