fix: #963 config not work

This commit is contained in:
Yidadaa
2023-04-22 00:12:07 +08:00
parent ab826363ea
commit ae479f4a92
16 changed files with 190 additions and 194 deletions

View File

@@ -11,6 +11,7 @@ import { isMobileScreen, trimTopic } from "../utils";
import Locale from "../locales";
import { showToast } from "../components/ui-lib";
import { ModelType, useAppConfig } from "./config";
export type Message = ChatCompletionResponseMessage & {
date: string;
@@ -30,133 +31,8 @@ export function createMessage(override: Partial<Message>): Message {
};
}
export enum SubmitKey {
Enter = "Enter",
CtrlEnter = "Ctrl + Enter",
ShiftEnter = "Shift + Enter",
AltEnter = "Alt + Enter",
MetaEnter = "Meta + Enter",
}
export enum Theme {
Auto = "auto",
Dark = "dark",
Light = "light",
}
export interface ChatConfig {
historyMessageCount: number; // -1 means all
compressMessageLengthThreshold: number;
sendBotMessages: boolean; // send bot's message or not
submitKey: SubmitKey;
avatar: string;
fontSize: number;
theme: Theme;
tightBorder: boolean;
sendPreviewBubble: boolean;
sidebarWidth: number;
disablePromptHint: boolean;
modelConfig: {
model: ModelType;
temperature: number;
max_tokens: number;
presence_penalty: number;
};
}
export type ModelConfig = ChatConfig["modelConfig"];
export const ROLES: Message["role"][] = ["system", "user", "assistant"];
const ENABLE_GPT4 = true;
export const ALL_MODELS = [
{
name: "gpt-4",
available: ENABLE_GPT4,
},
{
name: "gpt-4-0314",
available: ENABLE_GPT4,
},
{
name: "gpt-4-32k",
available: ENABLE_GPT4,
},
{
name: "gpt-4-32k-0314",
available: ENABLE_GPT4,
},
{
name: "gpt-3.5-turbo",
available: true,
},
{
name: "gpt-3.5-turbo-0301",
available: true,
},
] as const;
export type ModelType = (typeof ALL_MODELS)[number]["name"];
export function limitNumber(
x: number,
min: number,
max: number,
defaultValue: number,
) {
if (typeof x !== "number" || isNaN(x)) {
return defaultValue;
}
return Math.min(max, Math.max(min, x));
}
export function limitModel(name: string) {
return ALL_MODELS.some((m) => m.name === name && m.available)
? name
: ALL_MODELS[4].name;
}
export const ModalConfigValidator = {
model(x: string) {
return limitModel(x) as ModelType;
},
max_tokens(x: number) {
return limitNumber(x, 0, 32000, 2000);
},
presence_penalty(x: number) {
return limitNumber(x, -2, 2, 0);
},
temperature(x: number) {
return limitNumber(x, 0, 2, 1);
},
};
const DEFAULT_CONFIG: ChatConfig = {
historyMessageCount: 4,
compressMessageLengthThreshold: 1000,
sendBotMessages: true as boolean,
submitKey: SubmitKey.CtrlEnter as SubmitKey,
avatar: "1f603",
fontSize: 14,
theme: Theme.Auto as Theme,
tightBorder: false,
sendPreviewBubble: true,
sidebarWidth: 300,
disablePromptHint: false,
modelConfig: {
model: "gpt-3.5-turbo",
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
},
};
export interface ChatStat {
tokenCount: number;
wordCount: number;
@@ -202,7 +78,6 @@ function createEmptySession(): ChatSession {
}
interface ChatStore {
config: ChatConfig;
sessions: ChatSession[];
currentSessionIndex: number;
clearSessions: () => void;
@@ -226,9 +101,6 @@ interface ChatStore {
getMessagesWithMemory: () => Message[];
getMemoryPrompt: () => Message;
getConfig: () => ChatConfig;
resetConfig: () => void;
updateConfig: (updater: (config: ChatConfig) => void) => void;
clearAllData: () => void;
}
@@ -243,9 +115,6 @@ export const useChatStore = create<ChatStore>()(
(set, get) => ({
sessions: [createEmptySession()],
currentSessionIndex: 0,
config: {
...DEFAULT_CONFIG,
},
clearSessions() {
set(() => ({
@@ -254,20 +123,6 @@ export const useChatStore = create<ChatStore>()(
}));
},
resetConfig() {
set(() => ({ config: { ...DEFAULT_CONFIG } }));
},
getConfig() {
return get().config;
},
updateConfig(updater) {
const config = get().config;
updater(config);
set(() => ({ config }));
},
selectSession(index: number) {
set({
currentSessionIndex: index,
@@ -390,7 +245,7 @@ export const useChatStore = create<ChatStore>()(
role: "assistant",
streaming: true,
id: userMessage.id! + 1,
model: get().config.modelConfig.model,
model: useAppConfig.getState().modelConfig.model,
});
// get recent messages
@@ -443,8 +298,8 @@ export const useChatStore = create<ChatStore>()(
controller,
);
},
filterBot: !get().config.sendBotMessages,
modelConfig: get().config.modelConfig,
filterBot: !useAppConfig.getState().sendBotMessages,
modelConfig: useAppConfig.getState().modelConfig,
});
},
@@ -460,7 +315,7 @@ export const useChatStore = create<ChatStore>()(
getMessagesWithMemory() {
const session = get().currentSession();
const config = get().config;
const config = useAppConfig.getState();
const messages = session.messages.filter((msg) => !msg.isError);
const n = messages.length;
@@ -545,14 +400,14 @@ export const useChatStore = create<ChatStore>()(
});
}
const config = get().config;
const config = useAppConfig.getState();
let toBeSummarizedMsgs = session.messages.slice(
session.lastSummarizeIndex,
);
const historyMsgLength = countMessages(toBeSummarizedMsgs);
if (historyMsgLength > get().config?.modelConfig?.max_tokens ?? 4000) {
if (historyMsgLength > config?.modelConfig?.max_tokens ?? 4000) {
const n = toBeSummarizedMsgs.length;
toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
Math.max(0, n - config.historyMessageCount),

135
app/store/config.ts Normal file
View File

@@ -0,0 +1,135 @@
import { create } from "zustand";
import { persist } from "zustand/middleware";
export enum SubmitKey {
Enter = "Enter",
CtrlEnter = "Ctrl + Enter",
ShiftEnter = "Shift + Enter",
AltEnter = "Alt + Enter",
MetaEnter = "Meta + Enter",
}
export enum Theme {
Auto = "auto",
Dark = "dark",
Light = "light",
}
const DEFAULT_CONFIG = {
historyMessageCount: 4,
compressMessageLengthThreshold: 1000,
sendBotMessages: true as boolean,
submitKey: SubmitKey.CtrlEnter as SubmitKey,
avatar: "1f603",
fontSize: 14,
theme: Theme.Auto as Theme,
tightBorder: false,
sendPreviewBubble: true,
sidebarWidth: 300,
disablePromptHint: false,
modelConfig: {
model: "gpt-3.5-turbo" as ModelType,
temperature: 1,
max_tokens: 2000,
presence_penalty: 0,
},
};
export type ChatConfig = typeof DEFAULT_CONFIG;
export type ChatConfigStore = ChatConfig & {
reset: () => void;
update: (updater: (config: ChatConfig) => void) => void;
};
export type ModelConfig = ChatConfig["modelConfig"];
const ENABLE_GPT4 = true;
export const ALL_MODELS = [
{
name: "gpt-4",
available: ENABLE_GPT4,
},
{
name: "gpt-4-0314",
available: ENABLE_GPT4,
},
{
name: "gpt-4-32k",
available: ENABLE_GPT4,
},
{
name: "gpt-4-32k-0314",
available: ENABLE_GPT4,
},
{
name: "gpt-3.5-turbo",
available: true,
},
{
name: "gpt-3.5-turbo-0301",
available: true,
},
] as const;
export type ModelType = (typeof ALL_MODELS)[number]["name"];
export function limitNumber(
x: number,
min: number,
max: number,
defaultValue: number,
) {
if (typeof x !== "number" || isNaN(x)) {
return defaultValue;
}
return Math.min(max, Math.max(min, x));
}
export function limitModel(name: string) {
return ALL_MODELS.some((m) => m.name === name && m.available)
? name
: ALL_MODELS[4].name;
}
export const ModalConfigValidator = {
model(x: string) {
return limitModel(x) as ModelType;
},
max_tokens(x: number) {
return limitNumber(x, 0, 32000, 2000);
},
presence_penalty(x: number) {
return limitNumber(x, -2, 2, 0);
},
temperature(x: number) {
return limitNumber(x, 0, 2, 1);
},
};
const CONFIG_KEY = "app-config";
export const useAppConfig = create<ChatConfigStore>()(
persist(
(set, get) => ({
...DEFAULT_CONFIG,
reset() {
set(() => ({ ...DEFAULT_CONFIG }));
},
update(updater) {
const config = { ...get() };
updater(config);
set(() => config);
},
}),
{
name: CONFIG_KEY,
},
),
);

View File

@@ -1,3 +1,4 @@
export * from "./app";
export * from "./update";
export * from "./access";
export * from "./config";