fix bug (trim eats space or \n mistakenly), optimize timeout by model

This commit is contained in:
suruiqiang
2025-02-12 17:49:54 +08:00
parent 9714258322
commit 476d946f96
11 changed files with 75 additions and 85 deletions

View File

@@ -1,6 +1,6 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
@@ -17,6 +17,7 @@ import {
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
@@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {