mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-19 22:18:38 +08:00
Refactor Summarize Logic
[+] chore(chat.ts): remove unnecessary comment and refactor variable name [+] feat(chat.ts): add stream: false to config object
This commit is contained in:
@@ -494,6 +494,7 @@ export const useChatStore = createPersistStore(
|
|||||||
messages: topicMessages,
|
messages: topicMessages,
|
||||||
config: {
|
config: {
|
||||||
model: getSummarizeModel(session.mask.modelConfig.model),
|
model: getSummarizeModel(session.mask.modelConfig.model),
|
||||||
|
stream: false,
|
||||||
},
|
},
|
||||||
onFinish(message) {
|
onFinish(message) {
|
||||||
get().updateCurrentSession(
|
get().updateCurrentSession(
|
||||||
@@ -539,6 +540,10 @@ export const useChatStore = createPersistStore(
|
|||||||
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
||||||
modelConfig.sendMemory
|
modelConfig.sendMemory
|
||||||
) {
|
) {
|
||||||
|
/** Destruct max_tokens while summarizing
|
||||||
|
* this param is just shit
|
||||||
|
**/
|
||||||
|
const { max_tokens, ...modelcfg } = modelConfig;
|
||||||
api.llm.chat({
|
api.llm.chat({
|
||||||
messages: toBeSummarizedMsgs.concat(
|
messages: toBeSummarizedMsgs.concat(
|
||||||
createMessage({
|
createMessage({
|
||||||
@@ -548,7 +553,7 @@ export const useChatStore = createPersistStore(
|
|||||||
}),
|
}),
|
||||||
),
|
),
|
||||||
config: {
|
config: {
|
||||||
...modelConfig,
|
...modelcfg,
|
||||||
stream: true,
|
stream: true,
|
||||||
model: getSummarizeModel(session.mask.modelConfig.model),
|
model: getSummarizeModel(session.mask.modelConfig.model),
|
||||||
},
|
},
|
||||||
|
Reference in New Issue
Block a user