fix bug (trim eats space or \n mistakenly), optimize timeout by model

This commit is contained in:
suruiqiang
2025-02-12 17:49:54 +08:00
parent 9714258322
commit 476d946f96
11 changed files with 75 additions and 85 deletions

View File

@@ -1,12 +1,6 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
@@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
@@ -116,16 +111,10 @@ export class DeepSeekApi implements LLMApi {
headers: getHeaders(),
};
// console.log(chatPayload);
const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
@@ -176,8 +165,8 @@ export class DeepSeekApi implements LLMApi {
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.trim().length === 0) &&
(!content || content.trim().length === 0)
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
@@ -185,12 +174,12 @@ export class DeepSeekApi implements LLMApi {
};
}
if (reasoning && reasoning.trim().length > 0) {
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.trim().length > 0) {
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,