mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-12 02:14:41 +08:00
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content
This commit is contained in:
@@ -1,6 +1,11 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant";
|
||||
import {
|
||||
ApiPath,
|
||||
SILICONFLOW_BASE_URL,
|
||||
SiliconFlow,
|
||||
DEFAULT_MODELS,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
@@ -8,7 +13,7 @@ import {
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { streamWithThink } from "@/app/utils/chat";
|
||||
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
@@ -20,13 +25,23 @@ import { getClientConfig } from "@/app/config/client";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageTextContentWithoutThinking,
|
||||
isVisionModel,
|
||||
getTimeoutMSByModel,
|
||||
} from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
export interface SiliconFlowListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
export class SiliconflowApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
private disableListModels = false;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
@@ -67,13 +82,16 @@ export class SiliconflowApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
if (v.role === "assistant") {
|
||||
const content = getMessageTextContentWithoutThinking(v);
|
||||
messages.push({ role: v.role, content });
|
||||
} else {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
}
|
||||
@@ -234,6 +252,36 @@ export class SiliconflowApi implements LLMApi {
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
if (this.disableListModels) {
|
||||
return DEFAULT_MODELS.slice();
|
||||
}
|
||||
|
||||
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
|
||||
method: "GET",
|
||||
headers: {
|
||||
...getHeaders(),
|
||||
},
|
||||
});
|
||||
|
||||
const resJson = (await res.json()) as SiliconFlowListModelResponse;
|
||||
const chatModels = resJson.data;
|
||||
console.log("[Models]", chatModels);
|
||||
|
||||
if (!chatModels) {
|
||||
return [];
|
||||
}
|
||||
|
||||
let seq = 1000; //同 Constant.ts 中的排序保持一致
|
||||
return chatModels.map((m) => ({
|
||||
name: m.id,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "siliconflow",
|
||||
providerName: "SiliconFlow",
|
||||
providerType: "siliconflow",
|
||||
sorted: 14,
|
||||
},
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user