change request timeout for thinking mode
This commit is contained in:
parent
3fe55b4f7f
commit
a5a9768245
|
@ -5,6 +5,7 @@ import {
|
|||
DEEPSEEK_BASE_URL,
|
||||
DeepSeek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
|
@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi {
|
|||
|
||||
// console.log(chatPayload);
|
||||
|
||||
const isR1 =
|
||||
options.config.model.endsWith("-reasoner") ||
|
||||
options.config.model.endsWith("-r1");
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import {
|
||||
ApiPath,
|
||||
Google,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
|
@ -197,10 +202,11 @@ export class GeminiProApi implements LLMApi {
|
|||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
const isThinking = options.config.model.includes("-thinking");
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
|
|
|
@ -8,6 +8,7 @@ import {
|
|||
Azure,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
ChatMessageTool,
|
||||
|
@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
|
|||
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||
|
||||
const isDalle3 = _isDalle3(options.config.model);
|
||||
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3");
|
||||
const isO1OrO3 =
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3");
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
|
@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
|
|||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
isDalle3 || isO1OrO3
|
||||
? REQUEST_TIMEOUT_MS_FOR_THINKING
|
||||
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
);
|
||||
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
|
|
|
@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
|
|||
export const STORAGE_KEY = "chatgpt-next-web";
|
||||
|
||||
export const REQUEST_TIMEOUT_MS = 60000;
|
||||
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
|
||||
|
||||
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
|
||||
|
||||
|
|
Loading…
Reference in New Issue