mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-20 12:38:07 +08:00
fix bug (trim eats space or \n mistakenly), optimize timeout by model
This commit is contained in:
@@ -1,11 +1,6 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
SILICONFLOW_BASE_URL,
|
||||
SiliconFlow,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
@@ -25,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageTextContentWithoutThinking,
|
||||
getTimeoutMSByModel,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
@@ -123,7 +119,7 @@ export class SiliconflowApi implements LLMApi {
|
||||
// Use extended timeout for thinking models as they typically require more processing time
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
getTimeoutMSByModel(options.config.model),
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
|
Reference in New Issue
Block a user