Merge branch 'main' into feat-mcp

This commit is contained in:
Kadxy
2025-01-19 23:28:12 +08:00
committed by GitHub
37 changed files with 896 additions and 206 deletions

View File

@@ -10,6 +10,7 @@ import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability";
import { handle as iflytekHandler } from "../../iflytek";
import { handle as deepseekHandler } from "../../deepseek";
import { handle as xaiHandler } from "../../xai";
import { handle as chatglmHandler } from "../../glm";
import { handle as proxyHandler } from "../../proxy";
@@ -40,6 +41,8 @@ async function handle(
return stabilityHandler(req, { params });
case ApiPath.Iflytek:
return iflytekHandler(req, { params });
case ApiPath.DeepSeek:
return deepseekHandler(req, { params });
case ApiPath.XAI:
return xaiHandler(req, { params });
case ApiPath.ChatGLM:

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Alibaba as string,

View File

@@ -9,7 +9,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
@@ -122,7 +122,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Anthropic as string,

View File

@@ -92,6 +92,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
systemApiKey =
serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
break;
case ModelProvider.DeepSeek:
systemApiKey = serverConfig.deepseekApiKey;
break;
case ModelProvider.XAI:
systemApiKey = serverConfig.xaiApiKey;
break;

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig();
@@ -104,7 +104,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Baidu as string,

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ByteDance as string,

View File

@@ -2,7 +2,7 @@ import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server";
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
import { getModelProvider, isModelAvailableInServer } from "../utils/model";
import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
const serverConfig = getServerSideConfig();
@@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.OpenAI as string,
) ||
isModelAvailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Azure as string,
[
ServiceProvider.OpenAI,
ServiceProvider.Azure,
jsonBody?.model as string, // support provider-unspecified model
],
)
) {
return NextResponse.json(

View File

@@ -14,6 +14,7 @@ const DANGER_CONFIG = {
disableFastLink: serverConfig.disableFastLink,
customModels: serverConfig.customModels,
defaultModel: serverConfig.defaultModel,
visionModels: serverConfig.visionModels,
};
declare global {

128
app/api/deepseek.ts Normal file
View File

@@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
DEEPSEEK_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[DeepSeek Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.DeepSeek);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[DeepSeek] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");
let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.DeepSeek as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[DeepSeek] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ChatGLM as string,

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek
const serverConfig = getServerSideConfig();
@@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Iflytek as string,

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Moonshot as string,

View File

@@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
@@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false
if (
isModelAvailableInServer(
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.XAI as string,

View File

@@ -20,6 +20,7 @@ import { QwenApi } from "./platforms/alibaba";
import { HunyuanApi } from "./platforms/tencent";
import { MoonshotApi } from "./platforms/moonshot";
import { SparkApi } from "./platforms/iflytek";
import { DeepSeekApi } from "./platforms/deepseek";
import { XAIApi } from "./platforms/xai";
import { ChatGLMApi } from "./platforms/glm";
@@ -154,6 +155,9 @@ export class ClientApi {
case ModelProvider.Iflytek:
this.llm = new SparkApi();
break;
case ModelProvider.DeepSeek:
this.llm = new DeepSeekApi();
break;
case ModelProvider.XAI:
this.llm = new XAIApi();
break;
@@ -247,6 +251,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
const isEnabledAccessControl = accessStore.enabledAccessControl();
@@ -264,6 +269,8 @@ export function getHeaders(ignoreHeaders: boolean = false) {
? accessStore.moonshotApiKey
: isXAI
? accessStore.xaiApiKey
: isDeepSeek
? accessStore.deepseekApiKey
: isChatGLM
? accessStore.chatglmApiKey
: isIflytek
@@ -280,6 +287,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAlibaba,
isMoonshot,
isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
apiKey,
@@ -302,6 +310,13 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAzure,
isAnthropic,
isBaidu,
isByteDance,
isAlibaba,
isMoonshot,
isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
apiKey,
isEnabledAccessControl,
} = getConfig();
@@ -344,6 +359,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
return new ClientApi(ModelProvider.Moonshot);
case ServiceProvider.Iflytek:
return new ClientApi(ModelProvider.Iflytek);
case ServiceProvider.DeepSeek:
return new ClientApi(ModelProvider.DeepSeek);
case ServiceProvider.XAI:
return new ClientApi(ModelProvider.XAI);
case ServiceProvider.ChatGLM:

View File

@@ -0,0 +1,200 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class DeepSeekApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.deepseekUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.DeepSeek;
baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(DeepSeek.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@@ -21,16 +21,108 @@ import {
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat";
interface BasePayload {
model: string;
}
interface ChatPayload extends BasePayload {
messages: ChatOptions["messages"];
stream?: boolean;
temperature?: number;
presence_penalty?: number;
frequency_penalty?: number;
top_p?: number;
}
interface ImageGenerationPayload extends BasePayload {
prompt: string;
size?: string;
user_id?: string;
}
interface VideoGenerationPayload extends BasePayload {
prompt: string;
duration?: number;
resolution?: string;
user_id?: string;
}
type ModelType = "chat" | "image" | "video";
export class ChatGLMApi implements LLMApi {
private disableListModels = true;
private getModelType(model: string): ModelType {
if (model.startsWith("cogview-")) return "image";
if (model.startsWith("cogvideo-")) return "video";
return "chat";
}
private getModelPath(type: ModelType): string {
switch (type) {
case "image":
return ChatGLM.ImagePath;
case "video":
return ChatGLM.VideoPath;
default:
return ChatGLM.ChatPath;
}
}
private createPayload(
messages: ChatOptions["messages"],
modelConfig: any,
options: ChatOptions,
): BasePayload {
const modelType = this.getModelType(modelConfig.model);
const lastMessage = messages[messages.length - 1];
const prompt =
typeof lastMessage.content === "string"
? lastMessage.content
: lastMessage.content.map((c) => c.text).join("\n");
switch (modelType) {
case "image":
return {
model: modelConfig.model,
prompt,
size: options.config.size,
} as ImageGenerationPayload;
default:
return {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
} as ChatPayload;
}
}
private parseResponse(modelType: ModelType, json: any): string {
switch (modelType) {
case "image": {
const imageUrl = json.data?.[0]?.url;
return imageUrl ? `![Generated Image](${imageUrl})` : "";
}
case "video": {
const videoUrl = json.data?.[0]?.url;
return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
}
default:
return this.extractMessage(json);
}
}
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
@@ -51,7 +143,6 @@ export class ChatGLMApi implements LLMApi {
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
@@ -64,9 +155,12 @@ export class ChatGLMApi implements LLMApi {
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
@@ -78,25 +172,16 @@ export class ChatGLMApi implements LLMApi {
providerName: options.config.providerName,
},
};
const modelType = this.getModelType(modelConfig.model);
const requestPayload = this.createPayload(messages, modelConfig, options);
const path = this.path(this.getModelPath(modelType));
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
};
console.log(`[Request] glm ${modelType} payload: `, requestPayload);
console.log("[Request] glm payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(ChatGLM.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
@@ -104,12 +189,23 @@ export class ChatGLMApi implements LLMApi {
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (modelType === "image" || modelType === "video") {
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
console.log(`[Response] glm ${modelType}:`, resJson);
const message = this.parseResponse(modelType, resJson);
options.onFinish(message, res);
return;
}
const shouldStream = !!options.config.stream;
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
@@ -117,7 +213,7 @@ export class ChatGLMApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
path,
requestPayload,
getHeaders(),
tools as any,
@@ -125,7 +221,6 @@ export class ChatGLMApi implements LLMApi {
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
@@ -154,7 +249,7 @@ export class ChatGLMApi implements LLMApi {
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
// processToolMessage
(
requestPayload: RequestPayload,
toolCallMessage: any,
@@ -172,7 +267,7 @@ export class ChatGLMApi implements LLMApi {
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
@@ -184,6 +279,7 @@ export class ChatGLMApi implements LLMApi {
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,

View File

@@ -60,9 +60,18 @@ export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res);
const getTextFromParts = (parts: any[]) => {
if (!Array.isArray(parts)) return "";
return parts
.map((part) => part?.text || "")
.filter((text) => text.trim() !== "")
.join("\n\n");
};
return (
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
res?.at(0)?.candidates?.at(0)?.content?.parts.at(0)?.text ||
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message ||
""
);
@@ -223,7 +232,10 @@ export class GeminiProApi implements LLMApi {
},
});
}
return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text;
return chunkJson?.candidates
?.at(0)
?.content.parts?.map((part: { text: string }) => part.text)
.join("\n\n");
},
// processToolMessage, include tool_calls message and tool call results
(

View File

@@ -24,7 +24,7 @@ import {
stream,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
import {
ChatOptions,
@@ -73,7 +73,7 @@ export interface DalleRequestPayload {
prompt: string;
response_format: "url" | "b64_json";
n: number;
size: DalleSize;
size: ModelSize;
quality: DalleQuality;
style: DalleStyle;
}

View File

@@ -70,9 +70,8 @@ import {
isDalle3,
isVisionModel,
safeLocalStorage,
selectOrCopy,
showPlugins,
useMobileScreen,
getModelSizes,
supportsCustomSize,
} from "../utils";
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
@@ -80,7 +79,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
import { DalleQuality, DalleSize, DalleStyle } from "../typing";
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
import Locale from "../locales";
@@ -557,10 +556,11 @@ export function ChatActions(props: {
const [showSizeSelector, setShowSizeSelector] = useState(false);
const [showQualitySelector, setShowQualitySelector] = useState(false);
const [showStyleSelector, setShowStyleSelector] = useState(false);
const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"];
const modelSizes = getModelSizes(currentModel);
const dalle3Qualitys: DalleQuality[] = ["standard", "hd"];
const dalle3Styles: DalleStyle[] = ["vivid", "natural"];
const currentSize = session.mask.modelConfig?.size ?? "1024x1024";
const currentSize =
session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize);
const currentQuality = session.mask.modelConfig?.quality ?? "standard";
const currentStyle = session.mask.modelConfig?.style ?? "vivid";
@@ -711,7 +711,7 @@ export function ChatActions(props: {
/>
)}
{isDalle3(currentModel) && (
{supportsCustomSize(currentModel) && (
<ChatAction
onClick={() => setShowSizeSelector(true)}
text={currentSize}
@@ -722,7 +722,7 @@ export function ChatActions(props: {
{showSizeSelector && (
<Selector
defaultSelectedValue={currentSize}
items={dalle3Sizes.map((m) => ({
items={modelSizes.map((m) => ({
title: m,
value: m,
}))}
@@ -936,6 +936,12 @@ export function ShortcutKeyModal(props: { onClose: () => void }) {
title: Locale.Chat.ShortcutKey.showShortcutKey,
keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"],
},
{
title: Locale.Chat.ShortcutKey.clearContext,
keys: isMac
? ["⌘", "Shift", "backspace"]
: ["Ctrl", "Shift", "backspace"],
},
];
return (
<div className="modal-mask">
@@ -1592,7 +1598,7 @@ function _Chat() {
const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false);
useEffect(() => {
const handleKeyDown = (event: any) => {
const handleKeyDown = (event: KeyboardEvent) => {
// 打开新聊天 command + shift + o
if (
(event.metaKey || event.ctrlKey) &&
@@ -1643,14 +1649,30 @@ function _Chat() {
event.preventDefault();
setShowShortcutKeyModal(true);
}
// 清除上下文 command + shift + backspace
else if (
(event.metaKey || event.ctrlKey) &&
event.shiftKey &&
event.key.toLowerCase() === "backspace"
) {
event.preventDefault();
chatStore.updateTargetSession(session, (session) => {
if (session.clearContextIndex === session.messages.length) {
session.clearContextIndex = undefined;
} else {
session.clearContextIndex = session.messages.length;
session.memoryPrompt = ""; // will clear memory
}
});
}
};
window.addEventListener("keydown", handleKeyDown);
document.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
document.removeEventListener("keydown", handleKeyDown);
};
}, [messages, chatStore, navigate]);
}, [messages, chatStore, navigate, session]);
const [showChatSidePanel, setShowChatSidePanel] = useState(false);

View File

@@ -73,6 +73,7 @@ import {
Iflytek,
SAAS_CHAT_URL,
ChatGLM,
DeepSeek,
} from "../constant";
import { Prompt, SearchService, usePromptStore } from "../store/prompt";
import { ErrorBoundary } from "./error";
@@ -1197,6 +1198,47 @@ export function Settings() {
</>
);
const deepseekConfigComponent = accessStore.provider ===
ServiceProvider.DeepSeek && (
<>
<ListItem
title={Locale.Settings.Access.DeepSeek.Endpoint.Title}
subTitle={
Locale.Settings.Access.DeepSeek.Endpoint.SubTitle +
DeepSeek.ExampleEndpoint
}
>
<input
aria-label={Locale.Settings.Access.DeepSeek.Endpoint.Title}
type="text"
value={accessStore.deepseekUrl}
placeholder={DeepSeek.ExampleEndpoint}
onChange={(e) =>
accessStore.update(
(access) => (access.deepseekUrl = e.currentTarget.value),
)
}
></input>
</ListItem>
<ListItem
title={Locale.Settings.Access.DeepSeek.ApiKey.Title}
subTitle={Locale.Settings.Access.DeepSeek.ApiKey.SubTitle}
>
<PasswordInput
aria-label={Locale.Settings.Access.DeepSeek.ApiKey.Title}
value={accessStore.deepseekApiKey}
type="text"
placeholder={Locale.Settings.Access.DeepSeek.ApiKey.Placeholder}
onChange={(e) => {
accessStore.update(
(access) => (access.deepseekApiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
const XAIConfigComponent = accessStore.provider === ServiceProvider.XAI && (
<>
<ListItem
@@ -1733,6 +1775,7 @@ export function Settings() {
{alibabaConfigComponent}
{tencentConfigComponent}
{moonshotConfigComponent}
{deepseekConfigComponent}
{stabilityConfigComponent}
{lflytekConfigComponent}
{XAIConfigComponent}

View File

@@ -23,7 +23,6 @@ import {
MIN_SIDEBAR_WIDTH,
NARROW_SIDEBAR_WIDTH,
Path,
PLUGINS,
REPO_URL,
} from "../constant";
@@ -34,6 +33,12 @@ import { Selector, showConfirm } from "./ui-lib";
import clsx from "clsx";
import { isMcpEnabled } from "../mcp/actions";
const DISCOVERY = [
{ name: Locale.Plugin.Name, path: Path.Plugins },
{ name: "Stable Diffusion", path: Path.Sd },
{ name: Locale.SearchChat.Page.Title, path: Path.SearchChat },
];
const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, {
loading: () => null,
});
@@ -222,7 +227,7 @@ export function SideBarTail(props: {
export function SideBar(props: { className?: string }) {
useHotKey();
const { onDragStart, shouldNarrow } = useDragSideBar();
const [showPluginSelector, setShowPluginSelector] = useState(false);
const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
const navigate = useNavigate();
const config = useAppConfig();
const chatStore = useChatStore();
@@ -279,21 +284,21 @@ export function SideBar(props: { className?: string }) {
icon={<DiscoveryIcon />}
text={shouldNarrow ? undefined : Locale.Discovery.Name}
className={styles["sidebar-bar-button"]}
onClick={() => setShowPluginSelector(true)}
onClick={() => setshowDiscoverySelector(true)}
shadow
/>
</div>
{showPluginSelector && (
{showDiscoverySelector && (
<Selector
items={[
...PLUGINS.map((item) => {
...DISCOVERY.map((item) => {
return {
title: item.name,
value: item.path,
};
}),
]}
onClose={() => setShowPluginSelector(false)}
onClose={() => setshowDiscoverySelector(false)}
onSelection={(s) => {
navigate(s[0], { state: { fromHome: true } });
}}

View File

@@ -40,7 +40,6 @@ export const getBuildConfig = () => {
buildMode,
isApp,
template: process.env.DEFAULT_INPUT_TEMPLATE ?? DEFAULT_INPUT_TEMPLATE,
visionModels: process.env.VISION_MODELS || "",
};
};

View File

@@ -1,5 +1,6 @@
import md5 from "spark-md5";
import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant";
import { isGPT4Model } from "../utils/model";
declare global {
namespace NodeJS {
@@ -22,6 +23,7 @@ declare global {
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
CUSTOM_MODELS?: string; // to control custom models
DEFAULT_MODEL?: string; // to control default model in every new chat window
VISION_MODELS?: string; // to control vision models
// stability only
STABILITY_URL?: string;
@@ -71,6 +73,9 @@ declare global {
IFLYTEK_API_KEY?: string;
IFLYTEK_API_SECRET?: string;
DEEPSEEK_URL?: string;
DEEPSEEK_API_KEY?: string;
// xai only
XAI_URL?: string;
XAI_API_KEY?: string;
@@ -126,25 +131,16 @@ export const getServerSideConfig = () => {
const disableGPT4 = !!process.env.DISABLE_GPT4;
let customModels = process.env.CUSTOM_MODELS ?? "";
let defaultModel = process.env.DEFAULT_MODEL ?? "";
let visionModels = process.env.VISION_MODELS ?? "";
if (disableGPT4) {
if (customModels) customModels += ",";
customModels += DEFAULT_MODELS.filter(
(m) =>
(m.name.startsWith("gpt-4") ||
m.name.startsWith("chatgpt-4o") ||
m.name.startsWith("o1")) &&
!m.name.startsWith("gpt-4o-mini"),
)
customModels += DEFAULT_MODELS.filter((m) => isGPT4Model(m.name))
.map((m) => "-" + m.name)
.join(",");
if (
(defaultModel.startsWith("gpt-4") ||
defaultModel.startsWith("chatgpt-4o") ||
defaultModel.startsWith("o1")) &&
!defaultModel.startsWith("gpt-4o-mini")
)
if (defaultModel && isGPT4Model(defaultModel)) {
defaultModel = "";
}
}
const isStability = !!process.env.STABILITY_API_KEY;
@@ -159,6 +155,7 @@ export const getServerSideConfig = () => {
const isAlibaba = !!process.env.ALIBABA_API_KEY;
const isMoonshot = !!process.env.MOONSHOT_API_KEY;
const isIflytek = !!process.env.IFLYTEK_API_KEY;
const isDeepSeek = !!process.env.DEEPSEEK_API_KEY;
const isXAI = !!process.env.XAI_API_KEY;
const isChatGLM = !!process.env.CHATGLM_API_KEY;
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
@@ -223,6 +220,10 @@ export const getServerSideConfig = () => {
iflytekApiKey: process.env.IFLYTEK_API_KEY,
iflytekApiSecret: process.env.IFLYTEK_API_SECRET,
isDeepSeek,
deepseekUrl: process.env.DEEPSEEK_URL,
deepseekApiKey: getApiKey(process.env.DEEPSEEK_API_KEY),
isXAI,
xaiUrl: process.env.XAI_URL,
xaiApiKey: getApiKey(process.env.XAI_API_KEY),
@@ -252,6 +253,7 @@ export const getServerSideConfig = () => {
disableFastLink: !!process.env.DISABLE_FAST_LINK,
customModels,
defaultModel,
visionModels,
allowedWebDavEndpoints,
enableMcp: !!process.env.ENABLE_MCP,
};

View File

@@ -28,6 +28,8 @@ export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
export const MOONSHOT_BASE_URL = "https://api.moonshot.cn";
export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
export const XAI_BASE_URL = "https://api.x.ai";
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
@@ -66,6 +68,7 @@ export enum ApiPath {
Artifacts = "/api/artifacts",
XAI = "/api/xai",
ChatGLM = "/api/chatglm",
DeepSeek = "/api/deepseek",
}
export enum SlotID {
@@ -121,6 +124,7 @@ export enum ServiceProvider {
Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
}
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
@@ -145,6 +149,7 @@ export enum ModelProvider {
Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
}
export const Stability = {
@@ -227,6 +232,11 @@ export const Iflytek = {
ChatPath: "v1/chat/completions",
};
export const DeepSeek = {
ExampleEndpoint: DEEPSEEK_BASE_URL,
ChatPath: "chat/completions",
};
export const XAI = {
ExampleEndpoint: XAI_BASE_URL,
ChatPath: "v1/chat/completions",
@@ -235,6 +245,8 @@ export const XAI = {
export const ChatGLM = {
ExampleEndpoint: CHATGLM_BASE_URL,
ChatPath: "api/paas/v4/chat/completions",
ImagePath: "api/paas/v4/images/generations",
VideoPath: "api/paas/v4/videos/generations",
};
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
@@ -401,6 +413,8 @@ export const KnowledgeCutOffDate: Record<string, string> = {
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12",
"gemini-pro-vision": "2023-12",
"deepseek-chat": "2024-07",
"deepseek-coder": "2024-07",
};
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
@@ -429,6 +443,7 @@ export const VISION_MODEL_REGEXES = [
/qwen2-vl/,
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -546,6 +561,8 @@ const iflytekModels = [
"4.0Ultra",
];
const deepseekModels = ["deepseek-chat", "deepseek-coder"];
const xAIModes = ["grok-beta"];
const chatglmModels = [
@@ -557,6 +574,15 @@ const chatglmModels = [
"glm-4-long",
"glm-4-flashx",
"glm-4-flash",
"glm-4v-plus",
"glm-4v",
"glm-4v-flash", // free
"cogview-3-plus",
"cogview-3",
"cogview-3-flash", // free
// 目前无法适配轮询任务
// "cogvideox",
// "cogvideox-flash", // free
];
let seq = 1000; // 内置的模型序号生成器从1000开始
@@ -693,6 +719,17 @@ export const DEFAULT_MODELS = [
sorted: 12,
},
})),
...deepseekModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "deepseek",
providerName: "DeepSeek",
providerType: "deepseek",
sorted: 13,
},
})),
] as const;
export const CHAT_PAGE_SIZE = 15;
@@ -712,11 +749,6 @@ export const internalAllowedWebDavEndpoints = [
];
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
export const PLUGINS = [
{ name: "Plugins", path: Path.Plugins },
{ name: "Stable Diffusion", path: Path.Sd },
{ name: "Search Chat", path: Path.SearchChat },
];
export const SAAS_CHAT_URL = "https://nextchat.dev/chat";
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github";

View File

@@ -106,6 +106,7 @@ const cn = {
copyLastMessage: "复制最后一个回复",
copyLastCode: "复制最后一个代码块",
showShortcutKey: "显示快捷方式",
clearContext: "清除上下文",
},
},
Export: {
@@ -176,7 +177,7 @@ const cn = {
},
},
Lang: {
Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
All: "所有语言",
},
Avatar: "头像",
@@ -462,6 +463,17 @@ const cn = {
SubTitle: "样例:",
},
},
DeepSeek: {
ApiKey: {
Title: "接口密钥",
SubTitle: "使用自定义DeepSeek API Key",
Placeholder: "DeepSeek API Key",
},
Endpoint: {
Title: "接口地址",
SubTitle: "样例:",
},
},
XAI: {
ApiKey: {
Title: "接口密钥",
@@ -633,7 +645,7 @@ const cn = {
Sysmessage: "你是一个助手",
},
SearchChat: {
Name: "搜索",
Name: "搜索聊天记录",
Page: {
Title: "搜索聊天记录",
Search: "输入搜索关键词",

View File

@@ -107,6 +107,7 @@ const en: LocaleType = {
copyLastMessage: "Copy Last Reply",
copyLastCode: "Copy Last Code Block",
showShortcutKey: "Show Shortcuts",
clearContext: "Clear Context",
},
},
Export: {
@@ -446,6 +447,17 @@ const en: LocaleType = {
SubTitle: "Example: ",
},
},
DeepSeek: {
ApiKey: {
Title: "DeepSeek API Key",
SubTitle: "Use a custom DeepSeek API Key",
Placeholder: "DeepSeek API Key",
},
Endpoint: {
Title: "Endpoint Address",
SubTitle: "Example: ",
},
},
XAI: {
ApiKey: {
Title: "XAI API Key",

View File

@@ -100,6 +100,7 @@ const tw = {
copyLastMessage: "複製最後一個回覆",
copyLastCode: "複製最後一個程式碼區塊",
showShortcutKey: "顯示快捷方式",
clearContext: "清除上下文",
},
},
Export: {
@@ -485,7 +486,7 @@ const tw = {
},
},
SearchChat: {
Name: "搜尋",
Name: "搜尋聊天記錄",
Page: {
Title: "搜尋聊天記錄",
Search: "輸入搜尋關鍵詞",

View File

@@ -13,6 +13,7 @@ import {
MOONSHOT_BASE_URL,
STABILITY_BASE_URL,
IFLYTEK_BASE_URL,
DEEPSEEK_BASE_URL,
XAI_BASE_URL,
CHATGLM_BASE_URL,
} from "../constant";
@@ -47,6 +48,8 @@ const DEFAULT_STABILITY_URL = isApp ? STABILITY_BASE_URL : ApiPath.Stability;
const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek;
const DEFAULT_DEEPSEEK_URL = isApp ? DEEPSEEK_BASE_URL : ApiPath.DeepSeek;
const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI;
const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM;
@@ -108,6 +111,10 @@ const DEFAULT_ACCESS_STATE = {
iflytekApiKey: "",
iflytekApiSecret: "",
// deepseek
deepseekUrl: DEFAULT_DEEPSEEK_URL,
deepseekApiKey: "",
// xai
xaiUrl: DEFAULT_XAI_URL,
xaiApiKey: "",
@@ -124,6 +131,7 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false,
customModels: "",
defaultModel: "",
visionModels: "",
// tts config
edgeTTSVoiceName: "zh-CN-YunxiNeural",
@@ -138,7 +146,10 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
getVisionModels() {
this.fetch();
return get().visionModels;
},
edgeVoiceName() {
this.fetch();
@@ -183,6 +194,9 @@ export const useAccessStore = createPersistStore(
isValidIflytek() {
return ensure(get(), ["iflytekApiKey"]);
},
isValidDeepSeek() {
return ensure(get(), ["deepseekApiKey"]);
},
isValidXAI() {
return ensure(get(), ["xaiApiKey"]);
@@ -207,6 +221,7 @@ export const useAccessStore = createPersistStore(
this.isValidTencent() ||
this.isValidMoonshot() ||
this.isValidIflytek() ||
this.isValidDeepSeek() ||
this.isValidXAI() ||
this.isValidChatGLM() ||
!this.enabledAccessControl() ||

View File

@@ -244,7 +244,11 @@ export const useChatStore = createPersistStore(
const newSession = createEmptySession();
newSession.topic = currentSession.topic;
newSession.messages = [...currentSession.messages];
// 深拷贝消息
newSession.messages = currentSession.messages.map(msg => ({
...msg,
id: nanoid(), // 生成新的消息 ID
}));
newSession.mask = {
...currentSession.mask,
modelConfig: {

View File

@@ -1,5 +1,5 @@
import { LLMModel } from "../client/api";
import { DalleSize, DalleQuality, DalleStyle } from "../typing";
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { getClientConfig } from "../config/client";
import {
DEFAULT_INPUT_TEMPLATE,
@@ -78,7 +78,7 @@ export const DEFAULT_CONFIG = {
compressProviderName: "",
enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
size: "1024x1024" as DalleSize,
size: "1024x1024" as ModelSize,
quality: "standard" as DalleQuality,
style: "vivid" as DalleStyle,
},

View File

@@ -11,3 +11,14 @@ export interface RequestMessage {
export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";
export type DalleQuality = "standard" | "hd";
export type DalleStyle = "vivid" | "natural";
export type ModelSize =
| "1024x1024"
| "1792x1024"
| "1024x1792"
| "768x1344"
| "864x1152"
| "1344x768"
| "1152x864"
| "1440x720"
| "720x1440";

View File

@@ -6,7 +6,8 @@ import { ServiceProvider } from "./constant";
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream";
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
import { getClientConfig } from "./config/client";
import { useAccessStore } from "./store";
import { ModelSize } from "./typing";
export function trimTopic(topic: string) {
// Fix an issue where double quotes still show in the Indonesian language
@@ -254,8 +255,8 @@ export function getMessageImages(message: RequestMessage): string[] {
}
export function isVisionModel(model: string) {
const clientConfig = getClientConfig();
const envVisionModels = clientConfig?.visionModels
const visionModels = useAccessStore.getState().visionModels;
const envVisionModels = visionModels
?.split(",")
.map((m) => m.trim());
if (envVisionModels?.includes(model)) {
@@ -271,6 +272,28 @@ export function isDalle3(model: string) {
return "dall-e-3" === model;
}
export function getModelSizes(model: string): ModelSize[] {
if (isDalle3(model)) {
return ["1024x1024", "1792x1024", "1024x1792"];
}
if (model.toLowerCase().includes("cogview")) {
return [
"1024x1024",
"768x1344",
"864x1152",
"1344x768",
"1152x864",
"1440x720",
"720x1440",
];
}
return [];
}
export function supportsCustomSize(model: string): boolean {
return getModelSizes(model).length > 0;
}
export function showPlugins(provider: ServiceProvider, model: string) {
if (
provider == ServiceProvider.OpenAI ||

View File

@@ -202,3 +202,52 @@ export function isModelAvailableInServer(
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
return modelTable[fullName]?.available === false;
}
/**
* Check if the model name is a GPT-4 related model
*
* @param modelName The name of the model to check
* @returns True if the model is a GPT-4 related model (excluding gpt-4o-mini)
*/
export function isGPT4Model(modelName: string): boolean {
return (
(modelName.startsWith("gpt-4") ||
modelName.startsWith("chatgpt-4o") ||
modelName.startsWith("o1")) &&
!modelName.startsWith("gpt-4o-mini")
);
}
/**
* Checks if a model is not available on any of the specified providers in the server.
*
* @param {string} customModels - A string of custom models, comma-separated.
* @param {string} modelName - The name of the model to check.
* @param {string|string[]} providerNames - A string or array of provider names to check against.
*
* @returns {boolean} True if the model is not available on any of the specified providers, false otherwise.
*/
export function isModelNotavailableInServer(
customModels: string,
modelName: string,
providerNames: string | string[],
): boolean {
// Check DISABLE_GPT4 environment variable
if (
process.env.DISABLE_GPT4 === "1" &&
isGPT4Model(modelName.toLowerCase())
) {
return true;
}
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
const providerNamesArray = Array.isArray(providerNames)
? providerNames
: [providerNames];
for (const providerName of providerNamesArray) {
const fullName = `${modelName}@${providerName.toLowerCase()}`;
if (modelTable?.[fullName]?.available === true) return false;
}
return true;
}