diff --git a/README.md b/README.md index 290a7f6ac..c9f195771 100644 --- a/README.md +++ b/README.md @@ -88,10 +88,14 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] Desktop App with tauri - [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. -- [ ] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) +- [x] Plugins: support artifacts, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) + - [x] artifacts + - [ ] network search, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) ## What's New +- 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.9.11 you can use azure endpoint now. - 🚀 v2.8 now we have a client that runs across all platforms! @@ -120,15 +124,20 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] 使用 tauri 打包桌面应用 - [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) -- [ ] 插件机制,支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) +- [x] 插件机制,支持 artifacts,联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) + - [x] artifacts + - [ ] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) ## 最新动态 +- 🚀 v2.14.0 现在支持 Artifacts & SD 了。 +- 🚀 v2.10.1 现在支持 Gemini Pro 模型。 +- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 +- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 +- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 - 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。 - 💡 想要更方便地随时随地使用本项目?可以试下这款桌面插件:https://github.com/mushan0x0/AI0x0.com -- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。 -- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。 -- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 ## Get Started @@ -271,6 +280,18 @@ Alibaba Cloud Api Key. Alibaba Cloud Api Url. +### `IFLYTEK_URL` (Optional) + +iflytek Api Url. + +### `IFLYTEK_API_KEY` (Optional) + +iflytek Api Key. + +### `IFLYTEK_API_SECRET` (Optional) + +iflytek Api Secret. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty diff --git a/README_CN.md b/README_CN.md index 8c464dc09..beed396c5 100644 --- a/README_CN.md +++ b/README_CN.md @@ -172,6 +172,20 @@ ByteDance Api Url. 阿里云(千问)Api Url. +### `IFLYTEK_URL` (可选) + +讯飞星火Api Url. + +### `IFLYTEK_API_KEY` (可选) + +讯飞星火Api Key. + +### `IFLYTEK_API_SECRET` (可选) + +讯飞星火Api Secret. + + + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts new file mode 100644 index 000000000..06e3e5160 --- /dev/null +++ b/app/api/[provider]/[...path]/route.ts @@ -0,0 +1,66 @@ +import { ApiPath } from "@/app/constant"; +import { NextRequest, NextResponse } from "next/server"; +import { handle as openaiHandler } from "../../openai"; +import { handle as azureHandler } from "../../azure"; +import { handle as googleHandler } from "../../google"; +import { handle as anthropicHandler } from "../../anthropic"; +import { handle as baiduHandler } from "../../baidu"; +import { handle as bytedanceHandler } from "../../bytedance"; +import { handle as alibabaHandler } from "../../alibaba"; +import { handle as moonshotHandler } from "../../moonshot"; +import { handle as stabilityHandler } from "../../stability"; +import { handle as iflytekHandler } from "../../iflytek"; +async function handle( + req: NextRequest, + { params }: { params: { provider: string; path: string[] } }, +) { + const apiPath = `/api/${params.provider}`; + console.log(`[${params.provider} Route] params `, params); + switch (apiPath) { + case ApiPath.Azure: + return azureHandler(req, { params }); + case ApiPath.Google: + return googleHandler(req, { params }); + case ApiPath.Anthropic: + return anthropicHandler(req, { params }); + case ApiPath.Baidu: + return baiduHandler(req, { params }); + case ApiPath.ByteDance: + return bytedanceHandler(req, { params }); + case ApiPath.Alibaba: + return alibabaHandler(req, { params }); + // case ApiPath.Tencent: using "/api/tencent" + case ApiPath.Moonshot: + return moonshotHandler(req, { params }); + case ApiPath.Stability: + return stabilityHandler(req, { params }); + case ApiPath.Iflytek: + return iflytekHandler(req, { params }); + default: + return openaiHandler(req, { params }); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/alibaba/[...path]/route.ts b/app/api/alibaba.ts similarity index 91% rename from app/api/alibaba/[...path]/route.ts rename to app/api/alibaba.ts index c97ce5934..675d9f301 100644 --- a/app/api/alibaba/[...path]/route.ts +++ b/app/api/alibaba.ts @@ -14,7 +14,7 @@ import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -40,30 +40,6 @@ async function handle( } } -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - async function request(req: NextRequest) { const controller = new AbortController(); diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic.ts similarity index 92% rename from app/api/anthropic/[...path]/route.ts rename to app/api/anthropic.ts index 20f8d52e0..3d49f4c88 100644 --- a/app/api/anthropic/[...path]/route.ts +++ b/app/api/anthropic.ts @@ -9,13 +9,13 @@ import { } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; +import { auth } from "./auth"; import { isModelAvailableInServer } from "@/app/utils/model"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -56,30 +56,6 @@ async function handle( } } -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - const serverConfig = getServerSideConfig(); async function request(req: NextRequest) { diff --git a/app/api/auth.ts b/app/api/auth.ts index 2913a1477..95965ceec 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -85,6 +85,13 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.Qwen: systemApiKey = serverConfig.alibabaApiKey; break; + case ModelProvider.Moonshot: + systemApiKey = serverConfig.moonshotApiKey; + break; + case ModelProvider.Iflytek: + systemApiKey = + serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/azure/[...path]/route.ts b/app/api/azure.ts similarity index 66% rename from app/api/azure/[...path]/route.ts rename to app/api/azure.ts index 4a17de0c8..e2cb0c7e6 100644 --- a/app/api/azure/[...path]/route.ts +++ b/app/api/azure.ts @@ -2,10 +2,10 @@ import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { requestOpenai } from "../../common"; +import { auth } from "./auth"; +import { requestOpenai } from "./common"; -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -31,27 +31,3 @@ async function handle( return NextResponse.json(prettyObject(e)); } } - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/api/baidu/[...path]/route.ts b/app/api/baidu.ts similarity index 91% rename from app/api/baidu/[...path]/route.ts rename to app/api/baidu.ts index 94c9963c7..f4315d186 100644 --- a/app/api/baidu/[...path]/route.ts +++ b/app/api/baidu.ts @@ -14,7 +14,7 @@ import { getAccessToken } from "@/app/utils/baidu"; const serverConfig = getServerSideConfig(); -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -52,30 +52,6 @@ async function handle( } } -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - async function request(req: NextRequest) { const controller = new AbortController(); diff --git a/app/api/bytedance/[...path]/route.ts b/app/api/bytedance.ts similarity index 90% rename from app/api/bytedance/[...path]/route.ts rename to app/api/bytedance.ts index 336c837f0..cb65b1061 100644 --- a/app/api/bytedance/[...path]/route.ts +++ b/app/api/bytedance.ts @@ -12,7 +12,7 @@ import { isModelAvailableInServer } from "@/app/utils/model"; const serverConfig = getServerSideConfig(); -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -38,30 +38,6 @@ async function handle( } } -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; - async function request(req: NextRequest) { const controller = new AbortController(); diff --git a/app/api/google/[...path]/route.ts b/app/api/google.ts similarity index 96% rename from app/api/google/[...path]/route.ts rename to app/api/google.ts index 83a7ce794..98fe469bf 100644 --- a/app/api/google/[...path]/route.ts +++ b/app/api/google.ts @@ -1,5 +1,5 @@ import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; +import { auth } from "./auth"; import { getServerSideConfig } from "@/app/config/server"; import { ApiPath, @@ -11,9 +11,9 @@ import { prettyObject } from "@/app/utils/format"; const serverConfig = getServerSideConfig(); -async function handle( +export async function handle( req: NextRequest, - { params }: { params: { path: string[] } }, + { params }: { params: { provider: string; path: string[] } }, ) { console.log("[Google Route] params ", params); diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts new file mode 100644 index 000000000..eabdd9f4c --- /dev/null +++ b/app/api/iflytek.ts @@ -0,0 +1,131 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Iflytek, + IFLYTEK_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; +// iflytek + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Iflytek Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Iflytek); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Iflytek] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // iflytek use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, ""); + + let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Iflytek as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Iflytek] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts new file mode 100644 index 000000000..247dd6183 --- /dev/null +++ b/app/api/moonshot.ts @@ -0,0 +1,130 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Moonshot, + MOONSHOT_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Moonshot Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Moonshot); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Moonshot] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, ""); + + let baseUrl = serverConfig.moonshotUrl || MOONSHOT_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Moonshot as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Moonshot] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai.ts similarity index 80% rename from app/api/openai/[...path]/route.ts rename to app/api/openai.ts index 77059c151..0059ff8b4 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai.ts @@ -3,8 +3,8 @@ import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider, OpenaiPath } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; -import { auth } from "../../auth"; -import { requestOpenai } from "../../common"; +import { auth } from "./auth"; +import { requestOpenai } from "./common"; const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); @@ -13,14 +13,14 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( - (m) => !m.id.startsWith("gpt-4"), + (m) => !m.id.startsWith("gpt-4") || m.id.startsWith("gpt-4o-mini"), ); } return remoteModelRes; } -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -70,27 +70,3 @@ async function handle( return NextResponse.json(prettyObject(e)); } } - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; -export const preferredRegion = [ - "arn1", - "bom1", - "cdg1", - "cle1", - "cpt1", - "dub1", - "fra1", - "gru1", - "hnd1", - "iad1", - "icn1", - "kix1", - "lhr1", - "pdx1", - "sfo1", - "sin1", - "syd1", -]; diff --git a/app/api/stability/[...path]/route.ts b/app/api/stability.ts similarity index 95% rename from app/api/stability/[...path]/route.ts rename to app/api/stability.ts index 4b2bcc305..2646ace85 100644 --- a/app/api/stability/[...path]/route.ts +++ b/app/api/stability.ts @@ -3,7 +3,7 @@ import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant"; import { auth } from "@/app/api/auth"; -async function handle( +export async function handle( req: NextRequest, { params }: { params: { path: string[] } }, ) { @@ -97,8 +97,3 @@ async function handle( clearTimeout(timeoutId); } } - -export const GET = handle; -export const POST = handle; - -export const runtime = "edge"; diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts new file mode 100644 index 000000000..885909e7a --- /dev/null +++ b/app/api/tencent/route.ts @@ -0,0 +1,124 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + TENCENT_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, + Tencent, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { getHeader } from "@/app/utils/tencent"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Tencent Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Hunyuan); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Tencent] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let baseUrl = serverConfig.tencentUrl || TENCENT_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = baseUrl; + + const body = await req.text(); + const headers = await getHeader( + body, + serverConfig.tencentSecretId as string, + serverConfig.tencentSecretKey as string, + ); + const fetchOptions: RequestInit = { + headers, + method: req.method, + body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 1f58a884f..9f96cbfcf 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -29,6 +29,7 @@ async function handle( const requestUrl = new URL(req.url); let endpoint = requestUrl.searchParams.get("endpoint"); + let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method; // Validate the endpoint to prevent potential SSRF attacks if ( @@ -65,7 +66,11 @@ async function handle( const targetPath = `${endpoint}${endpointPath}`; // only allow MKCOL, GET, PUT - if (req.method !== "MKCOL" && req.method !== "GET" && req.method !== "PUT") { + if ( + proxy_method !== "MKCOL" && + proxy_method !== "GET" && + proxy_method !== "PUT" + ) { return NextResponse.json( { error: true, @@ -78,7 +83,7 @@ async function handle( } // for MKCOL request, only allow request ${folder} - if (req.method === "MKCOL" && !targetPath.endsWith(folder)) { + if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) { return NextResponse.json( { error: true, @@ -91,7 +96,7 @@ async function handle( } // for GET request, only allow request ending with fileName - if (req.method === "GET" && !targetPath.endsWith(fileName)) { + if (proxy_method === "GET" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -104,7 +109,7 @@ async function handle( } // for PUT request, only allow request ending with fileName - if (req.method === "PUT" && !targetPath.endsWith(fileName)) { + if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -118,7 +123,7 @@ async function handle( const targetUrl = targetPath; - const method = req.method; + const method = proxy_method || req.method; const shouldNotHaveBody = ["get", "head"].includes( method?.toLowerCase() ?? "", ); @@ -143,7 +148,7 @@ async function handle( "[Any Proxy]", targetUrl, { - method: req.method, + method: method, }, { status: fetchResult?.status, diff --git a/app/client/api.ts b/app/client/api.ts index 102a4220f..98202c4db 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -6,12 +6,15 @@ import { ServiceProvider, } from "../constant"; import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; -import { ChatGPTApi } from "./platforms/openai"; +import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai"; import { GeminiProApi } from "./platforms/google"; import { ClaudeApi } from "./platforms/anthropic"; import { ErnieApi } from "./platforms/baidu"; import { DoubaoApi } from "./platforms/bytedance"; import { QwenApi } from "./platforms/alibaba"; +import { HunyuanApi } from "./platforms/tencent"; +import { MoonshotApi } from "./platforms/moonshot"; +import { SparkApi } from "./platforms/iflytek"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -40,6 +43,7 @@ export interface LLMConfig { stream?: boolean; presence_penalty?: number; frequency_penalty?: number; + size?: DalleRequestPayload["size"]; } export interface ChatOptions { @@ -62,12 +66,14 @@ export interface LLMModel { displayName?: string; available: boolean; provider: LLMModelProvider; + sorted: number; } export interface LLMModelProvider { id: string; providerName: string; providerType: string; + sorted: number; } export abstract class LLMApi { @@ -117,6 +123,15 @@ export class ClientApi { case ModelProvider.Qwen: this.llm = new QwenApi(); break; + case ModelProvider.Hunyuan: + this.llm = new HunyuanApi(); + break; + case ModelProvider.Moonshot: + this.llm = new MoonshotApi(); + break; + case ModelProvider.Iflytek: + this.llm = new SparkApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -199,6 +214,8 @@ export function getHeaders() { const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance; const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; + const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; + const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -210,6 +227,12 @@ export function getHeaders() { ? accessStore.bytedanceApiKey : isAlibaba ? accessStore.alibabaApiKey + : isMoonshot + ? accessStore.moonshotApiKey + : isIflytek + ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret + ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret + : "" : accessStore.openaiApiKey; return { isGoogle, @@ -218,6 +241,8 @@ export function getHeaders() { isBaidu, isByteDance, isAlibaba, + isMoonshot, + isIflytek, apiKey, isEnabledAccessControl, }; @@ -267,6 +292,12 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Doubao); case ServiceProvider.Alibaba: return new ClientApi(ModelProvider.Qwen); + case ServiceProvider.Tencent: + return new ClientApi(ModelProvider.Hunyuan); + case ServiceProvider.Moonshot: + return new ClientApi(ModelProvider.Moonshot); + case ServiceProvider.Iflytek: + return new ClientApi(ModelProvider.Iflytek); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 188b78bf9..3be147f49 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -77,16 +77,24 @@ export class ErnieApi implements LLMApi { async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ - role: v.role, + // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", + role: v.role === "system" ? "user" : v.role, content: getMessageTextContent(v), })); // "error_code": 336006, "error_msg": "the length of messages must be an odd number", if (messages.length % 2 === 0) { - messages.unshift({ - role: "user", - content: " ", - }); + if (messages.at(0)?.role === "user") { + messages.splice(1, 0, { + role: "assistant", + content: " ", + }); + } else { + messages.unshift({ + role: "user", + content: " ", + }); + } } const modelConfig = { diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 1f55beebc..12d884635 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -25,11 +25,9 @@ export class GeminiProApi implements LLMApi { baseUrl = accessStore.googleUrl; } + const isApp = !!getClientConfig()?.isApp; if (baseUrl.length === 0) { - const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + `/api/proxy/google?key=${accessStore.googleApiKey}` - : ApiPath.Google; + baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google; } if (baseUrl.endsWith("/")) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); @@ -43,6 +41,10 @@ export class GeminiProApi implements LLMApi { let chatPath = [baseUrl, path].join("/"); chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; + // if chatPath.startsWith('http') then add key in query string + if (chatPath.startsWith("http") && accessStore.googleApiKey) { + chatPath += `&key=${accessStore.googleApiKey}`; + } return chatPath; } extractMessage(res: any) { diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts new file mode 100644 index 000000000..73cea5ba0 --- /dev/null +++ b/app/client/platforms/iflytek.ts @@ -0,0 +1,240 @@ +"use client"; +import { + ApiPath, + DEFAULT_API_HOST, + Iflytek, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +import { OpenAIListModelResponse, RequestPayload } from "./openai"; + +export class SparkApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.iflytekUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.Iflytek; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] Spark payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Iflytek.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // Make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // Animate response text to make it look smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // Start animation + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Spark] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + // Handle different error scenarios + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + extraInfo = Locale.Error.Unauthorized; + } + + options.onError?.( + new Error( + `Request failed with status ${res.status}: ${extraInfo}`, + ), + ); + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text); + options.onError?.(new Error(`Failed to parse response: ${text}`)); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + if (!res.ok) { + const errorText = await res.text(); + options.onError?.( + new Error(`Request failed with status ${res.status}: ${errorText}`), + ); + return; + } + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts new file mode 100644 index 000000000..7d257ccb2 --- /dev/null +++ b/app/client/platforms/moonshot.ts @@ -0,0 +1,251 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { + ApiPath, + DEFAULT_API_HOST, + DEFAULT_MODELS, + Moonshot, + REQUEST_TIMEOUT_MS, + ServiceProvider, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { collectModelsWithDefaultModel } from "@/app/utils/model"; +import { preProcessImageContent } from "@/app/utils/chat"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +import { OpenAIListModelResponse, RequestPayload } from "./openai"; + +export class MoonshotApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.moonshotUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.Moonshot; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] openai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Moonshot.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[OpenAI] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + const textmoderation = json?.prompt_filter_results; + + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 680125fe6..98c6f406a 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -11,8 +11,13 @@ import { } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { collectModelsWithDefaultModel } from "@/app/utils/model"; -import { preProcessImageContent } from "@/app/utils/chat"; +import { + preProcessImageContent, + uploadImage, + base64Image2Blob, +} from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; +import { DalleSize } from "@/app/typing"; import { ChatOptions, @@ -33,6 +38,7 @@ import { getMessageTextContent, getMessageImages, isVisionModel, + isDalle3 as _isDalle3, } from "@/app/utils"; export interface OpenAIListModelResponse { @@ -58,6 +64,14 @@ export interface RequestPayload { max_tokens?: number; } +export interface DalleRequestPayload { + model: string; + prompt: string; + response_format: "url" | "b64_json"; + n: number; + size: DalleSize; +} + export class ChatGPTApi implements LLMApi { private disableListModels = true; @@ -100,20 +114,31 @@ export class ChatGPTApi implements LLMApi { return cloudflareAIGatewayUrl([baseUrl, path].join("/")); } - extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; + async extractMessage(res: any) { + if (res.error) { + return "```\n" + JSON.stringify(res, null, 4) + "\n```"; + } + // dalle3 model return url, using url create image message + if (res.data) { + let url = res.data?.at(0)?.url ?? ""; + const b64_json = res.data?.at(0)?.b64_json ?? ""; + if (!url && b64_json) { + // uploadImage + url = await uploadImage(base64Image2Blob(b64_json, "image/png")); + } + return [ + { + type: "image_url", + image_url: { + url, + }, + }, + ]; + } + return res.choices?.at(0)?.message?.content ?? res; } async chat(options: ChatOptions) { - const visionModel = isVisionModel(options.config.model); - const messages: ChatOptions["messages"] = []; - for (const v of options.messages) { - const content = visionModel - ? await preProcessImageContent(v.content) - : getMessageTextContent(v); - messages.push({ role: v.role, content }); - } - const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, @@ -123,26 +148,52 @@ export class ChatGPTApi implements LLMApi { }, }; - const requestPayload: RequestPayload = { - messages, - stream: options.config.stream, - model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, - // max_tokens: Math.max(modelConfig.max_tokens, 1024), - // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. - }; + let requestPayload: RequestPayload | DalleRequestPayload; - // add max_tokens to vision model - if (visionModel && modelConfig.model.includes("preview")) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + const isDalle3 = _isDalle3(options.config.model); + if (isDalle3) { + const prompt = getMessageTextContent( + options.messages.slice(-1)?.pop() as any, + ); + requestPayload = { + model: options.config.model, + prompt, + // URLs are only valid for 60 minutes after the image has been generated. + response_format: "b64_json", // using b64_json, and save image in CacheStorage + n: 1, + size: options.config?.size ?? "1024x1024", + }; + } else { + const visionModel = isVisionModel(options.config.model); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + requestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + // add max_tokens to vision model + if (visionModel && modelConfig.model.includes("preview")) { + requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + } } console.log("[Request] openai payload: ", requestPayload); - const shouldStream = !!options.config.stream; + const shouldStream = !isDalle3 && !!options.config.stream; const controller = new AbortController(); options.onController?.(controller); @@ -168,13 +219,15 @@ export class ChatGPTApi implements LLMApi { model?.provider?.providerName === ServiceProvider.Azure, ); chatPath = this.path( - Azure.ChatPath( + (isDalle3 ? Azure.ImagePath : Azure.ChatPath)( (model?.displayName ?? model?.name) as string, useCustomConfig ? useAccessStore.getState().azureApiVersion : "", ), ); } else { - chatPath = this.path(OpenaiPath.ChatPath); + chatPath = this.path( + isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath, + ); } const chatPayload = { method: "POST", @@ -186,7 +239,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - REQUEST_TIMEOUT_MS, + isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); if (shouldStream) { @@ -317,7 +370,7 @@ export class ChatGPTApi implements LLMApi { clearTimeout(requestTimeoutId); const resJson = await res.json(); - const message = this.extractMessage(resJson); + const message = await this.extractMessage(resJson); options.onFinish(message); } } catch (e) { @@ -411,13 +464,17 @@ export class ChatGPTApi implements LLMApi { return []; } + //由于目前 OpenAI 的 disableListModels 默认为 true,所以当前实际不会运行到这场 + let seq = 1000; //同 Constant.ts 中的排序保持一致 return chatModels.map((m) => ({ name: m.id, available: true, + sorted: seq++, provider: { id: "openai", providerName: "OpenAI", providerType: "openai", + sorted: 1, }, })); } diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts new file mode 100644 index 000000000..579008a9b --- /dev/null +++ b/app/client/platforms/tencent.ts @@ -0,0 +1,268 @@ +"use client"; +import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import mapKeys from "lodash-es/mapKeys"; +import mapValues from "lodash-es/mapValues"; +import isArray from "lodash-es/isArray"; +import isObject from "lodash-es/isObject"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + Messages: { + Role: "system" | "user" | "assistant"; + Content: string | MultimodalContent[]; + }[]; + Stream?: boolean; + Model: string; + Temperature: number; + TopP: number; +} + +function capitalizeKeys(obj: any): any { + if (isArray(obj)) { + return obj.map(capitalizeKeys); + } else if (isObject(obj)) { + return mapValues( + mapKeys(obj, (value: any, key: string) => + key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()), + ), + capitalizeKeys, + ); + } else { + return obj; + } +} + +export class HunyuanApi implements LLMApi { + path(): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.tencentUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp + ? DEFAULT_API_HOST + "/api/proxy/tencent" + : ApiPath.Tencent; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl); + return baseUrl; + } + + extractMessage(res: any) { + return res.Choices?.at(0)?.Message?.Content ?? ""; + } + + async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); + const messages = options.messages.map((v, index) => ({ + // "Messages 中 system 角色必须位于列表的最开始" + role: index !== 0 && v.role === "system" ? "user" : v.role, + content: visionModel ? v.content : getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const requestPayload: RequestPayload = capitalizeKeys({ + model: modelConfig.model, + messages, + temperature: modelConfig.temperature, + top_p: modelConfig.top_p, + stream: options.config.stream, + }); + + console.log("[Request] Tencent payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Tencent] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.Choices as Array<{ + Delta: { Content: string }; + }>; + const delta = choices[0]?.Delta?.Content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/command.ts b/app/command.ts index e515e5f0b..bea4e06f3 100644 --- a/app/command.ts +++ b/app/command.ts @@ -41,13 +41,16 @@ interface ChatCommands { del?: Command; } -export const ChatCommandPrefix = ":"; +// Compatible with Chinese colon character ":" +export const ChatCommandPrefix = /^[::]/; export function useChatCommand(commands: ChatCommands = {}) { function extract(userInput: string) { - return ( - userInput.startsWith(ChatCommandPrefix) ? userInput.slice(1) : userInput - ) as keyof ChatCommands; + const match = userInput.match(ChatCommandPrefix); + if (match) { + return userInput.slice(1) as keyof ChatCommands; + } + return userInput as keyof ChatCommands; } function search(userInput: string) { @@ -57,7 +60,7 @@ export function useChatCommand(commands: ChatCommands = {}) { .filter((c) => c.startsWith(input)) .map((c) => ({ title: desc[c as keyof ChatCommands], - content: ChatCommandPrefix + c, + content: ":" + c, })); } diff --git a/app/components/chat.tsx b/app/components/chat.tsx index bb4b611ad..e4ba869c3 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -37,6 +37,7 @@ import AutoIcon from "../icons/auto.svg"; import BottomIcon from "../icons/bottom.svg"; import StopIcon from "../icons/pause.svg"; import RobotIcon from "../icons/robot.svg"; +import SizeIcon from "../icons/size.svg"; import PluginIcon from "../icons/plugin.svg"; import { @@ -60,6 +61,7 @@ import { getMessageTextContent, getMessageImages, isVisionModel, + isDalle3, } from "../utils"; import { uploadImage as uploadImageRemote } from "@/app/utils/chat"; @@ -67,6 +69,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat"; import dynamic from "next/dynamic"; import { ChatControllerPool } from "../client/controller"; +import { DalleSize } from "../typing"; import { Prompt, usePromptStore } from "../store/prompt"; import Locale from "../locales"; @@ -481,6 +484,11 @@ export function ChatActions(props: { const [showPluginSelector, setShowPluginSelector] = useState(false); const [showUploadImage, setShowUploadImage] = useState(false); + const [showSizeSelector, setShowSizeSelector] = useState(false); + const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"]; + const currentSize = + chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024"; + useEffect(() => { const show = isVisionModel(currentModel); setShowUploadImage(show); @@ -624,6 +632,33 @@ export function ChatActions(props: { /> )} + {isDalle3(currentModel) && ( + setShowSizeSelector(true)} + text={currentSize} + icon={} + /> + )} + + {showSizeSelector && ( + ({ + title: m, + value: m, + }))} + onClose={() => setShowSizeSelector(false)} + onSelection={(s) => { + if (s.length === 0) return; + const size = s[0]; + chatStore.updateCurrentSession((session) => { + session.mask.modelConfig.size = size; + }); + showToast(size); + }} + /> + )} + setShowPluginSelector(true)} text={Locale.Plugin.Name} @@ -732,6 +767,7 @@ function _Chat() { const session = chatStore.currentSession(); const config = useAppConfig(); const fontSize = config.fontSize; + const fontFamily = config.fontFamily; const [showExport, setShowExport] = useState(false); @@ -811,7 +847,7 @@ function _Chat() { // clear search results if (n === 0) { setPromptHints([]); - } else if (text.startsWith(ChatCommandPrefix)) { + } else if (text.match(ChatCommandPrefix)) { setPromptHints(chatCommands.search(text)); } else if (!config.disablePromptHint && n < SEARCH_TEXT_LIMIT) { // check if need to trigger auto completion @@ -1470,6 +1506,7 @@ function _Chat() { )}
= messages.length - 6} /> @@ -1576,6 +1614,7 @@ function _Chat() { autoFocus={autoFocus} style={{ fontSize: config.fontSize, + fontFamily: config.fontFamily, }} /> {attachImages.length != 0 && ( diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 8210f61fb..1771cc9b0 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -583,6 +583,7 @@ export function ImagePreviewer(props: { {getMessageImages(m).length == 1 && ( diff --git a/app/components/home.module.scss b/app/components/home.module.scss index b836d2bec..b31334568 100644 --- a/app/components/home.module.scss +++ b/app/components/home.module.scss @@ -137,12 +137,18 @@ position: relative; padding-top: 20px; padding-bottom: 20px; + display: flex; + justify-content: space-between; + align-items: center; } .sidebar-logo { - position: absolute; - right: 0; - bottom: 18px; + display: inline-flex; +} + +.sidebar-title-container { + display: inline-flex; + flex-direction: column; } .sidebar-title { diff --git a/app/components/markdown.tsx b/app/components/markdown.tsx index fc8df6fe3..1531d2ff0 100644 --- a/app/components/markdown.tsx +++ b/app/components/markdown.tsx @@ -96,6 +96,32 @@ export function PreCode(props: { children: any }) { [plugins], ); + //Wrap the paragraph for plain-text + useEffect(() => { + if (ref.current) { + const codeElements = ref.current.querySelectorAll( + "code", + ) as NodeListOf; + const wrapLanguages = [ + "", + "md", + "markdown", + "text", + "txt", + "plaintext", + "tex", + "latex", + ]; + codeElements.forEach((codeElement) => { + let languageClass = codeElement.className.match(/language-(\w+)/); + let name = languageClass ? languageClass[1] : ""; + if (wrapLanguages.includes(name)) { + codeElement.style.whiteSpace = "pre-wrap"; + } + }); + } + }, []); + return ( <>
@@ -206,6 +232,7 @@ export function Markdown(
     content: string;
     loading?: boolean;
     fontSize?: number;
+    fontFamily?: string;
     parentRef?: RefObject;
     defaultShow?: boolean;
   } & React.DOMAttributes,
@@ -217,6 +244,7 @@ export function Markdown(
       className="markdown-body"
       style={{
         fontSize: `${props.fontSize ?? 14}px`,
+        fontFamily: props.fontFamily || "inherit",
       }}
       ref={mdRef}
       onContextMenu={props.onContextMenu}
diff --git a/app/components/sd/sd.tsx b/app/components/sd/sd.tsx
index f35d0e0cd..4f5f86960 100644
--- a/app/components/sd/sd.tsx
+++ b/app/components/sd/sd.tsx
@@ -23,7 +23,6 @@ import CopyIcon from "@/app/icons/copy.svg";
 import PromptIcon from "@/app/icons/prompt.svg";
 import ResetIcon from "@/app/icons/reload.svg";
 import { useSdStore } from "@/app/store/sd";
-import locales from "@/app/locales";
 import LoadingIcon from "@/app/icons/three-dots.svg";
 import ErrorIcon from "@/app/icons/delete.svg";
 import SDIcon from "@/app/icons/sd.svg";
@@ -64,14 +63,14 @@ function getSdTaskStatus(item: any) {
   return (
     

- {locales.Sd.Status.Name}: {s} + {Locale.Sd.Status.Name}: {s} {item.status === "error" && ( { showModal({ - title: locales.Sd.Detail, + title: Locale.Sd.Detail, children: (

{item.error} @@ -189,13 +188,13 @@ export function Sd() { className={styles["sd-img-item-info"]} >

- {locales.SdPanel.Prompt}:{" "} + {Locale.SdPanel.Prompt}:{" "} { showModal({ - title: locales.Sd.Detail, + title: Locale.Sd.Detail, children: (

{item.params.prompt} @@ -208,7 +207,7 @@ export function Sd() {

- {locales.SdPanel.AIModel}: {item.model_name} + {Locale.SdPanel.AIModel}: {item.model_name}

{getSdTaskStatus(item)}

{item.created_at}

@@ -219,7 +218,7 @@ export function Sd() { icon={} onClick={() => { showModal({ - title: locales.Sd.GenerateParams, + title: Locale.Sd.GenerateParams, children: (
{Object.keys(item.params).map((key) => { @@ -325,7 +324,7 @@ export function Sd() { ); }) ) : ( -
{locales.Sd.EmptyRecord}
+
{Locale.Sd.EmptyRecord}
)}
diff --git a/app/components/settings.tsx b/app/components/settings.tsx index bde3a792a..71fd2d839 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -54,8 +54,10 @@ import { Anthropic, Azure, Baidu, + Tencent, ByteDance, Alibaba, + Moonshot, Google, GoogleSafetySettingsThreshold, OPENAI_BASE_URL, @@ -66,6 +68,7 @@ import { SlotID, UPDATE_URL, Stability, + Iflytek, } from "../constant"; import { Prompt, SearchService, usePromptStore } from "../store/prompt"; import { ErrorBoundary } from "./error"; @@ -964,6 +967,57 @@ export function Settings() { ); + const tencentConfigComponent = accessStore.provider === + ServiceProvider.Tencent && ( + <> + + + accessStore.update( + (access) => (access.tencentUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.tencentSecretId = e.currentTarget.value), + ); + }} + /> + + + { + accessStore.update( + (access) => (access.tencentSecretKey = e.currentTarget.value), + ); + }} + /> + + + ); + const byteDanceConfigComponent = accessStore.provider === ServiceProvider.ByteDance && ( <> @@ -1042,6 +1096,45 @@ export function Settings() { ); + const moonshotConfigComponent = accessStore.provider === + ServiceProvider.Moonshot && ( + <> + + + accessStore.update( + (access) => (access.moonshotUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.moonshotApiKey = e.currentTarget.value), + ); + }} + /> + + + ); + const stabilityConfigComponent = accessStore.provider === ServiceProvider.Stability && ( <> @@ -1080,6 +1173,60 @@ export function Settings() { ); + const lflytekConfigComponent = accessStore.provider === + ServiceProvider.Iflytek && ( + <> + + + accessStore.update( + (access) => (access.iflytekUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.iflytekApiKey = e.currentTarget.value), + ); + }} + /> + + + + { + accessStore.update( + (access) => (access.iflytekApiSecret = e.currentTarget.value), + ); + }} + /> + + + ); return ( @@ -1224,6 +1371,22 @@ export function Settings() { > + + + updateConfig( + (config) => (config.fontFamily = e.currentTarget.value), + ) + } + > + + )} diff --git a/app/components/sidebar.tsx b/app/components/sidebar.tsx index dbcf7c5db..d8def056e 100644 --- a/app/components/sidebar.tsx +++ b/app/components/sidebar.tsx @@ -171,10 +171,12 @@ export function SideBarHeader(props: { return (
-
- {title} +
+
+ {title} +
+
{subTitle}
-
{subTitle}
{logo}
{children} diff --git a/app/config/server.ts b/app/config/server.ts index 59bc30ee2..5bfa2c2df 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -57,6 +57,20 @@ declare global { ALIBABA_URL?: string; ALIBABA_API_KEY?: string; + // tencent only + TENCENT_URL?: string; + TENCENT_SECRET_KEY?: string; + TENCENT_SECRET_ID?: string; + + // moonshot only + MOONSHOT_URL?: string; + MOONSHOT_API_KEY?: string; + + // iflytek only + IFLYTEK_URL?: string; + IFLYTEK_API_KEY?: string; + IFLYTEK_API_SECRET?: string; + // custom template for preprocessing user input DEFAULT_INPUT_TEMPLATE?: string; } @@ -105,10 +119,16 @@ export const getServerSideConfig = () => { if (disableGPT4) { if (customModels) customModels += ","; - customModels += DEFAULT_MODELS.filter((m) => m.name.startsWith("gpt-4")) + customModels += DEFAULT_MODELS.filter( + (m) => m.name.startsWith("gpt-4") && !m.name.startsWith("gpt-4o-mini"), + ) .map((m) => "-" + m.name) .join(","); - if (defaultModel.startsWith("gpt-4")) defaultModel = ""; + if ( + defaultModel.startsWith("gpt-4") && + !defaultModel.startsWith("gpt-4o-mini") + ) + defaultModel = ""; } const isStability = !!process.env.STABILITY_API_KEY; @@ -116,10 +136,13 @@ export const getServerSideConfig = () => { const isAzure = !!process.env.AZURE_URL; const isGoogle = !!process.env.GOOGLE_API_KEY; const isAnthropic = !!process.env.ANTHROPIC_API_KEY; + const isTencent = !!process.env.TENCENT_API_KEY; const isBaidu = !!process.env.BAIDU_API_KEY; const isBytedance = !!process.env.BYTEDANCE_API_KEY; const isAlibaba = !!process.env.ALIBABA_API_KEY; + const isMoonshot = !!process.env.MOONSHOT_API_KEY; + const isIflytek = !!process.env.IFLYTEK_API_KEY; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const randomIndex = Math.floor(Math.random() * apiKeys.length); @@ -168,6 +191,20 @@ export const getServerSideConfig = () => { alibabaUrl: process.env.ALIBABA_URL, alibabaApiKey: getApiKey(process.env.ALIBABA_API_KEY), + isTencent, + tencentUrl: process.env.TENCENT_URL, + tencentSecretKey: getApiKey(process.env.TENCENT_SECRET_KEY), + tencentSecretId: process.env.TENCENT_SECRET_ID, + + isMoonshot, + moonshotUrl: process.env.MOONSHOT_URL, + moonshotApiKey: getApiKey(process.env.MOONSHOT_API_KEY), + + isIflytek, + iflytekUrl: process.env.IFLYTEK_URL, + iflytekApiKey: process.env.IFLYTEK_API_KEY, + iflytekApiSecret: process.env.IFLYTEK_API_SECRET, + cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID, cloudflareKVNamespaceId: process.env.CLOUDFLARE_KV_NAMESPACE_ID, cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY), diff --git a/app/constant.ts b/app/constant.ts index 36c3f397b..aa207718c 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -23,6 +23,11 @@ export const BYTEDANCE_BASE_URL = "https://ark.cn-beijing.volces.com"; export const ALIBABA_BASE_URL = "https://dashscope.aliyuncs.com/api/"; +export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com"; + +export const MOONSHOT_BASE_URL = "https://api.moonshot.cn"; +export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com"; + export const CACHE_URL_PREFIX = "/api/cache"; export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`; @@ -47,6 +52,9 @@ export enum ApiPath { Baidu = "/api/baidu", ByteDance = "/api/bytedance", Alibaba = "/api/alibaba", + Tencent = "/api/tencent", + Moonshot = "/api/moonshot", + Iflytek = "/api/iflytek", Stability = "/api/stability", Artifacts = "/api/artifacts", } @@ -100,7 +108,10 @@ export enum ServiceProvider { Baidu = "Baidu", ByteDance = "ByteDance", Alibaba = "Alibaba", + Tencent = "Tencent", + Moonshot = "Moonshot", Stability = "Stability", + Iflytek = "Iflytek", } // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings @@ -120,6 +131,9 @@ export enum ModelProvider { Ernie = "Ernie", Doubao = "Doubao", Qwen = "Qwen", + Hunyuan = "Hunyuan", + Moonshot = "Moonshot", + Iflytek = "Iflytek", } export const Stability = { @@ -136,6 +150,7 @@ export const Anthropic = { export const OpenaiPath = { ChatPath: "v1/chat/completions", + ImagePath: "v1/images/generations", UsagePath: "dashboard/billing/usage", SubsPath: "dashboard/billing/subscription", ListModelPath: "v1/models", @@ -144,7 +159,10 @@ export const OpenaiPath = { export const Azure = { ChatPath: (deployName: string, apiVersion: string) => `deployments/${deployName}/chat/completions?api-version=${apiVersion}`, - ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}", + // https://.openai.azure.com/openai/deployments//images/generations?api-version= + ImagePath: (deployName: string, apiVersion: string) => + `deployments/${deployName}/images/generations?api-version=${apiVersion}`, + ExampleEndpoint: "https://{resource-url}/openai", }; export const Google = { @@ -183,6 +201,20 @@ export const Alibaba = { ChatPath: "v1/services/aigc/text-generation/generation", }; +export const Tencent = { + ExampleEndpoint: TENCENT_BASE_URL, +}; + +export const Moonshot = { + ExampleEndpoint: MOONSHOT_BASE_URL, + ChatPath: "v1/chat/completions", +}; + +export const Iflytek = { + ExampleEndpoint: IFLYTEK_BASE_URL, + ChatPath: "v1/chat/completions", +}; + export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang // export const DEFAULT_SYSTEM_TEMPLATE = ` // You are ChatGPT, a large language model trained by {{ServiceProvider}}. @@ -237,6 +269,7 @@ const openaiModels = [ "gpt-4-vision-preview", "gpt-4-turbo-2024-04-09", "gpt-4-1106-preview", + "dall-e-3", ]; const googleModels = [ @@ -289,68 +322,136 @@ const alibabaModes = [ "qwen-max-longcontext", ]; +const tencentModels = [ + "hunyuan-pro", + "hunyuan-standard", + "hunyuan-lite", + "hunyuan-role", + "hunyuan-functioncall", + "hunyuan-code", + "hunyuan-vision", +]; + +const moonshotModes = ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]; + +const iflytekModels = [ + "general", + "generalv3", + "pro-128k", + "generalv3.5", + "4.0Ultra", +]; + +let seq = 1000; // 内置的模型序号生成器从1000开始 export const DEFAULT_MODELS = [ ...openaiModels.map((name) => ({ name, available: true, + sorted: seq++, // Global sequence sort(index) provider: { id: "openai", providerName: "OpenAI", providerType: "openai", + sorted: 1, // 这里是固定的,确保顺序与之前内置的版本一致 }, })), ...openaiModels.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "azure", providerName: "Azure", providerType: "azure", + sorted: 2, }, })), ...googleModels.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "google", providerName: "Google", providerType: "google", + sorted: 3, }, })), ...anthropicModels.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "anthropic", providerName: "Anthropic", providerType: "anthropic", + sorted: 4, }, })), ...baiduModels.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "baidu", providerName: "Baidu", providerType: "baidu", + sorted: 5, }, })), ...bytedanceModels.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "bytedance", providerName: "ByteDance", providerType: "bytedance", + sorted: 6, }, })), ...alibabaModes.map((name) => ({ name, available: true, + sorted: seq++, provider: { id: "alibaba", providerName: "Alibaba", providerType: "alibaba", + sorted: 7, + }, + })), + ...tencentModels.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "tencent", + providerName: "Tencent", + providerType: "tencent", + sorted: 8, + }, + })), + ...moonshotModes.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "moonshot", + providerName: "Moonshot", + providerType: "moonshot", + sorted: 9, + }, + })), + ...iflytekModels.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "iflytek", + providerName: "Iflytek", + providerType: "iflytek", + sorted: 10, }, })), ] as const; diff --git a/app/icons/size.svg b/app/icons/size.svg new file mode 100644 index 000000000..3da4fadfe --- /dev/null +++ b/app/icons/size.svg @@ -0,0 +1 @@ + diff --git a/app/locales/ar.ts b/app/locales/ar.ts index b58c3a2e8..0a35ba603 100644 --- a/app/locales/ar.ts +++ b/app/locales/ar.ts @@ -111,6 +111,11 @@ const ar: PartialLocaleType = { Title: "حجم الخط", SubTitle: "ضبط حجم الخط لمحتوى الدردشة", }, + FontFamily: { + Title: "خط الدردشة", + SubTitle: "خط محتوى الدردشة، اتركه فارغًا لتطبيق الخط الافتراضي العالمي", + Placeholder: "اسم الخط", + }, InjectSystemPrompts: { Title: "حقن تلميحات النظام", SubTitle: diff --git a/app/locales/bn.ts b/app/locales/bn.ts index 6dfb0da9b..cc650a3fe 100644 --- a/app/locales/bn.ts +++ b/app/locales/bn.ts @@ -136,6 +136,12 @@ const bn: PartialLocaleType = { Title: "ফন্ট সাইজ", SubTitle: "চ্যাট সামগ্রীর ফন্ট সাইজ সংশোধন করুন", }, + FontFamily: { + Title: "চ্যাট ফন্ট", + SubTitle: + "চ্যাট সামগ্রীর ফন্ট, বিশ্বব্যাপী ডিফল্ট ফন্ট প্রয়োগ করতে খালি রাখুন", + Placeholder: "ফন্টের নাম", + }, InjectSystemPrompts: { Title: "حقن تلميحات النظام", SubTitle: diff --git a/app/locales/cn.ts b/app/locales/cn.ts index cae41bfee..4f47403ab 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -156,6 +156,11 @@ const cn = { Title: "字体大小", SubTitle: "聊天内容的字体大小", }, + FontFamily: { + Title: "聊天字体", + SubTitle: "聊天内容的字体,若置空则应用全局默认字体", + Placeholder: "字体名称", + }, InjectSystemPrompts: { Title: "注入系统级提示信息", SubTitle: "强制给每次请求的消息列表开头添加一个模拟 ChatGPT 的系统提示", @@ -371,6 +376,22 @@ const cn = { SubTitle: "不支持自定义前往.env配置", }, }, + Tencent: { + ApiKey: { + Title: "API Key", + SubTitle: "使用自定义腾讯云API Key", + Placeholder: "Tencent API Key", + }, + SecretKey: { + Title: "Secret Key", + SubTitle: "使用自定义腾讯云Secret Key", + Placeholder: "Tencent Secret Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "不支持自定义前往.env配置", + }, + }, ByteDance: { ApiKey: { Title: "接口密钥", @@ -393,6 +414,17 @@ const cn = { SubTitle: "样例:", }, }, + Moonshot: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义月之暗面API Key", + Placeholder: "Moonshot API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, Stability: { ApiKey: { Title: "接口密钥", @@ -404,6 +436,22 @@ const cn = { SubTitle: "样例:", }, }, + Iflytek: { + ApiKey: { + Title: "ApiKey", + SubTitle: "从讯飞星火控制台获取的 APIKey", + Placeholder: "APIKey", + }, + ApiSecret: { + Title: "ApiSecret", + SubTitle: "从讯飞星火控制台获取的 APISecret", + Placeholder: "APISecret", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, CustomModel: { Title: "自定义模型名", SubTitle: "增加自定义模型可选项,使用英文逗号隔开", diff --git a/app/locales/cs.ts b/app/locales/cs.ts index c1a84430f..e70a0e839 100644 --- a/app/locales/cs.ts +++ b/app/locales/cs.ts @@ -71,6 +71,12 @@ const cs: PartialLocaleType = { Title: "Velikost písma", SubTitle: "Nastavení velikosti písma obsahu chatu", }, + FontFamily: { + Title: "Chatové Písmo", + SubTitle: + "Písmo obsahu chatu, ponechejte prázdné pro použití globálního výchozího písma", + Placeholder: "Název Písma", + }, InjectSystemPrompts: { Title: "Vložit systémové prompty", SubTitle: diff --git a/app/locales/de.ts b/app/locales/de.ts index 2fe871bc9..e50760d88 100644 --- a/app/locales/de.ts +++ b/app/locales/de.ts @@ -71,6 +71,12 @@ const de: PartialLocaleType = { Title: "Schriftgröße", SubTitle: "Schriftgröße des Chat-Inhalts anpassen", }, + FontFamily: { + Title: "Chat-Schriftart", + SubTitle: + "Schriftart des Chat-Inhalts, leer lassen, um die globale Standardschriftart anzuwenden", + Placeholder: "Schriftartname", + }, InjectSystemPrompts: { Title: "System-Prompts einfügen", SubTitle: diff --git a/app/locales/en.ts b/app/locales/en.ts index bfb383e8f..ac7880329 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -158,6 +158,12 @@ const en: LocaleType = { Title: "Font Size", SubTitle: "Adjust font size of chat content", }, + FontFamily: { + Title: "Chat Font Family", + SubTitle: + "Font Family of the chat content, leave empty to apply global default font", + Placeholder: "Font Family Name", + }, InjectSystemPrompts: { Title: "Inject System Prompts", SubTitle: "Inject a global system prompt for every request", @@ -354,6 +360,22 @@ const en: LocaleType = { SubTitle: "not supported, configure in .env", }, }, + Tencent: { + ApiKey: { + Title: "Tencent API Key", + SubTitle: "Use a custom Tencent API Key", + Placeholder: "Tencent API Key", + }, + SecretKey: { + Title: "Tencent Secret Key", + SubTitle: "Use a custom Tencent Secret Key", + Placeholder: "Tencent Secret Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "not supported, configure in .env", + }, + }, ByteDance: { ApiKey: { Title: "ByteDance API Key", @@ -376,6 +398,17 @@ const en: LocaleType = { SubTitle: "Example: ", }, }, + Moonshot: { + ApiKey: { + Title: "Moonshot API Key", + SubTitle: "Use a custom Moonshot API Key", + Placeholder: "Moonshot API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, Stability: { ApiKey: { Title: "Stability API Key", @@ -387,6 +420,22 @@ const en: LocaleType = { SubTitle: "Example: ", }, }, + Iflytek: { + ApiKey: { + Title: "Iflytek API Key", + SubTitle: "Use a Iflytek API Key", + Placeholder: "Iflytek API Key", + }, + ApiSecret: { + Title: "Iflytek API Secret", + SubTitle: "Use a Iflytek API Secret", + Placeholder: "Iflytek API Secret", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, CustomModel: { Title: "Custom Models", SubTitle: "Custom model options, seperated by comma", diff --git a/app/locales/es.ts b/app/locales/es.ts index 7d742d536..f595fa3fc 100644 --- a/app/locales/es.ts +++ b/app/locales/es.ts @@ -71,6 +71,12 @@ const es: PartialLocaleType = { Title: "Tamaño de fuente", SubTitle: "Ajustar el tamaño de fuente del contenido del chat", }, + FontFamily: { + Title: "Fuente del Chat", + SubTitle: + "Fuente del contenido del chat, dejar vacío para aplicar la fuente predeterminada global", + Placeholder: "Nombre de la Fuente", + }, InjectSystemPrompts: { Title: "Inyectar Prompts del Sistema", SubTitle: diff --git a/app/locales/fr.ts b/app/locales/fr.ts index 944754d62..0fac154ed 100644 --- a/app/locales/fr.ts +++ b/app/locales/fr.ts @@ -111,6 +111,12 @@ const fr: PartialLocaleType = { Title: "Taille des polices", SubTitle: "Ajuste la taille de police du contenu de la conversation", }, + FontFamily: { + Title: "Police de Chat", + SubTitle: + "Police du contenu du chat, laissez vide pour appliquer la police par défaut globale", + Placeholder: "Nom de la Police", + }, InjectSystemPrompts: { Title: "Injecter des invites système", SubTitle: diff --git a/app/locales/id.ts b/app/locales/id.ts index 571156a57..0353433f9 100644 --- a/app/locales/id.ts +++ b/app/locales/id.ts @@ -140,6 +140,12 @@ const id: PartialLocaleType = { Title: "Ukuran Font", SubTitle: "Ubah ukuran font konten chat", }, + FontFamily: { + Title: "Font Obrolan", + SubTitle: + "Font dari konten obrolan, biarkan kosong untuk menerapkan font default global", + Placeholder: "Nama Font", + }, InjectSystemPrompts: { Title: "Suntikkan Petunjuk Sistem", SubTitle: @@ -369,8 +375,8 @@ const id: PartialLocaleType = { }, Exporter: { Description: { - Title: "Hanya pesan setelah menghapus konteks yang akan ditampilkan" - }, + Title: "Hanya pesan setelah menghapus konteks yang akan ditampilkan", + }, Model: "Model", Messages: "Pesan", Topic: "Topik", diff --git a/app/locales/it.ts b/app/locales/it.ts index 7f0a95846..5981c8d3d 100644 --- a/app/locales/it.ts +++ b/app/locales/it.ts @@ -71,6 +71,12 @@ const it: PartialLocaleType = { Title: "Dimensione carattere", SubTitle: "Regolare la dimensione dei caratteri del contenuto della chat", }, + FontFamily: { + Title: "Font della Chat", + SubTitle: + "Carattere del contenuto della chat, lascia vuoto per applicare il carattere predefinito globale", + Placeholder: "Nome del Font", + }, InjectSystemPrompts: { Title: "Inserisci Prompts di Sistema", SubTitle: diff --git a/app/locales/jp.ts b/app/locales/jp.ts index dcbd0f282..94ed8f8ac 100644 --- a/app/locales/jp.ts +++ b/app/locales/jp.ts @@ -118,6 +118,12 @@ const jp: PartialLocaleType = { Title: "フォントサイズ", SubTitle: "チャット内容のフォントサイズ", }, + FontFamily: { + Title: "チャットフォント", + SubTitle: + "チャットコンテンツのフォント、空白の場合はグローバルデフォルトフォントを適用します", + Placeholder: "フォント名", + }, InjectSystemPrompts: { Title: "システムプロンプトの挿入", SubTitle: diff --git a/app/locales/ko.ts b/app/locales/ko.ts index 844459fc4..b392e1c77 100644 --- a/app/locales/ko.ts +++ b/app/locales/ko.ts @@ -72,6 +72,11 @@ const ko: PartialLocaleType = { Title: "글꼴 크기", SubTitle: "채팅 내용의 글꼴 크기 조정", }, + FontFamily: { + Title: "채팅 폰트", + SubTitle: "채팅 내용의 폰트, 비워 두면 글로벌 기본 폰트를 적용", + Placeholder: "폰트 이름", + }, InjectSystemPrompts: { Title: "시스템 프롬프트 주입", SubTitle: diff --git a/app/locales/no.ts b/app/locales/no.ts index 3a0e61107..5e9dc252d 100644 --- a/app/locales/no.ts +++ b/app/locales/no.ts @@ -66,6 +66,12 @@ const no: PartialLocaleType = { Title: "Fontstørrelsen", SubTitle: "Juster fontstørrelsen for samtaleinnholdet.", }, + FontFamily: { + Title: "Chat-skrifttype", + SubTitle: + "Skrifttypen for chatinnhold, la stå tom for å bruke global standardskrifttype", + Placeholder: "Skriftnavn", + }, InjectSystemPrompts: { Title: "Sett inn systemprompter", SubTitle: diff --git a/app/locales/pt.ts b/app/locales/pt.ts index 8151b7aa4..10e915e92 100644 --- a/app/locales/pt.ts +++ b/app/locales/pt.ts @@ -153,6 +153,12 @@ const pt: PartialLocaleType = { Title: "Tamanho da Fonte", SubTitle: "Ajustar o tamanho da fonte do conteúdo do chat", }, + FontFamily: { + Title: "Fonte do Chat", + SubTitle: + "Fonte do conteúdo do chat, deixe vazio para aplicar a fonte padrão global", + Placeholder: "Nome da Fonte", + }, InjectSystemPrompts: { Title: "Inserir Prompts de Sistema", SubTitle: "Inserir um prompt de sistema global para cada requisição", diff --git a/app/locales/ru.ts b/app/locales/ru.ts index d12cf3e42..b547402ea 100644 --- a/app/locales/ru.ts +++ b/app/locales/ru.ts @@ -71,6 +71,12 @@ const ru: PartialLocaleType = { Title: "Размер шрифта", SubTitle: "Настроить размер шрифта контента чата", }, + FontFamily: { + Title: "Шрифт чата", + SubTitle: + "Шрифт содержимого чата, оставьте пустым для применения глобального шрифта по умолчанию", + Placeholder: "Название шрифта", + }, InjectSystemPrompts: { Title: "Вставить системные подсказки", SubTitle: diff --git a/app/locales/sk.ts b/app/locales/sk.ts index a97b7175c..9014f4f0c 100644 --- a/app/locales/sk.ts +++ b/app/locales/sk.ts @@ -155,6 +155,12 @@ const sk: PartialLocaleType = { Title: "Veľkosť písma", SubTitle: "Nastaviť veľkosť písma obsahu chatu", }, + FontFamily: { + Title: "Chatové Písmo", + SubTitle: + "Písmo obsahu chatu, ponechajte prázdne pre použitie globálneho predvoleného písma", + Placeholder: "Názov Písma", + }, InjectSystemPrompts: { Title: "Vložiť systémové výzvy", SubTitle: "Vložiť globálnu systémovú výzvu pre každú požiadavku", diff --git a/app/locales/tr.ts b/app/locales/tr.ts index 524c1b2c5..551e5e342 100644 --- a/app/locales/tr.ts +++ b/app/locales/tr.ts @@ -71,6 +71,12 @@ const tr: PartialLocaleType = { Title: "Yazı Boyutu", SubTitle: "Sohbet içeriğinin yazı boyutunu ayarlayın", }, + FontFamily: { + Title: "Sohbet Yazı Tipi", + SubTitle: + "Sohbet içeriğinin yazı tipi, boş bırakıldığında küresel varsayılan yazı tipi uygulanır", + Placeholder: "Yazı Tipi Adı", + }, InjectSystemPrompts: { Title: "Sistem İpucu Ekleyin", SubTitle: diff --git a/app/locales/tw.ts b/app/locales/tw.ts index 8011de13f..03afd432c 100644 --- a/app/locales/tw.ts +++ b/app/locales/tw.ts @@ -153,6 +153,11 @@ const tw = { Title: "字型大小", SubTitle: "聊天內容的字型大小", }, + FontFamily: { + Title: "聊天字體", + SubTitle: "聊天內容的字體,若置空則應用全局默認字體", + Placeholder: "字體名稱", + }, InjectSystemPrompts: { Title: "匯入系統提示", SubTitle: "強制在每個請求的訊息列表開頭新增一個模擬 ChatGPT 的系統提示", diff --git a/app/locales/vi.ts b/app/locales/vi.ts index 3d95b5664..1f5c4fc68 100644 --- a/app/locales/vi.ts +++ b/app/locales/vi.ts @@ -71,6 +71,12 @@ const vi: PartialLocaleType = { Title: "Font chữ", SubTitle: "Thay đổi font chữ của nội dung trò chuyện", }, + FontFamily: { + Title: "Phông Chữ Trò Chuyện", + SubTitle: + "Phông chữ của nội dung trò chuyện, để trống để áp dụng phông chữ mặc định toàn cầu", + Placeholder: "Tên Phông Chữ", + }, InjectSystemPrompts: { Title: "Tiêm Prompt Hệ thống", SubTitle: diff --git a/app/store/access.ts b/app/store/access.ts index a204d35c9..b89b080d8 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -39,10 +39,22 @@ const DEFAULT_ALIBABA_URL = isApp ? DEFAULT_API_HOST + "/api/proxy/alibaba" : ApiPath.Alibaba; +const DEFAULT_TENCENT_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/tencent" + : ApiPath.Tencent; + +const DEFAULT_MOONSHOT_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/moonshot" + : ApiPath.Moonshot; + const DEFAULT_STABILITY_URL = isApp ? DEFAULT_API_HOST + "/api/proxy/stability" : ApiPath.Stability; +const DEFAULT_IFLYTEK_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/iflytek" + : ApiPath.Iflytek; + const DEFAULT_ACCESS_STATE = { accessCode: "", useCustomConfig: false, @@ -82,10 +94,24 @@ const DEFAULT_ACCESS_STATE = { alibabaUrl: DEFAULT_ALIBABA_URL, alibabaApiKey: "", + // moonshot + moonshotUrl: DEFAULT_MOONSHOT_URL, + moonshotApiKey: "", + //stability stabilityUrl: DEFAULT_STABILITY_URL, stabilityApiKey: "", + // tencent + tencentUrl: DEFAULT_TENCENT_URL, + tencentSecretKey: "", + tencentSecretId: "", + + // iflytek + iflytekUrl: DEFAULT_IFLYTEK_URL, + iflytekApiKey: "", + iflytekApiSecret: "", + // server config needCode: true, hideUserApiKey: false, @@ -134,6 +160,17 @@ export const useAccessStore = createPersistStore( return ensure(get(), ["alibabaApiKey"]); }, + isValidTencent() { + return ensure(get(), ["tencentSecretKey", "tencentSecretId"]); + }, + + isValidMoonshot() { + return ensure(get(), ["moonshotApiKey"]); + }, + isValidIflytek() { + return ensure(get(), ["iflytekApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -146,6 +183,9 @@ export const useAccessStore = createPersistStore( this.isValidBaidu() || this.isValidByteDance() || this.isValidAlibaba() || + this.isValidTencent || + this.isValidMoonshot() || + this.isValidIflytek() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) ); diff --git a/app/store/chat.ts b/app/store/chat.ts index 5892ef0c8..653926d1b 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -26,6 +26,7 @@ import { nanoid } from "nanoid"; import { createPersistStore } from "../utils/store"; import { collectModelsWithDefaultModel } from "../utils/model"; import { useAccessStore } from "./access"; +import { isDalle3 } from "../utils"; export type ChatMessage = RequestMessage & { date: string; @@ -541,8 +542,13 @@ export const useChatStore = createPersistStore( const config = useAppConfig.getState(); const session = get().currentSession(); const modelConfig = session.mask.modelConfig; + // skip summarize when using dalle3? + if (isDalle3(modelConfig.model)) { + return; + } - const api: ClientApi = getClientApi(modelConfig.providerName); + const providerName = modelConfig.providerName; + const api: ClientApi = getClientApi(providerName); // remove error messages if any const messages = session.messages; @@ -565,6 +571,7 @@ export const useChatStore = createPersistStore( config: { model: getSummarizeModel(session.mask.modelConfig.model), stream: false, + providerName, }, onFinish(message) { get().updateCurrentSession( diff --git a/app/store/config.ts b/app/store/config.ts index 1eaafe12b..b478858a9 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -1,4 +1,5 @@ import { LLMModel } from "../client/api"; +import { DalleSize } from "../typing"; import { getClientConfig } from "../config/client"; import { DEFAULT_INPUT_TEMPLATE, @@ -33,6 +34,7 @@ export const DEFAULT_CONFIG = { submitKey: SubmitKey.Enter, avatar: "1f603", fontSize: 14, + fontFamily: "", theme: Theme.Auto as Theme, tightBorder: !!config?.isApp, sendPreviewBubble: true, @@ -60,6 +62,7 @@ export const DEFAULT_CONFIG = { compressMessageLengthThreshold: 1000, enableInjectSystemPrompts: true, template: config?.template ?? DEFAULT_INPUT_TEMPLATE, + size: "1024x1024" as DalleSize, }, }; diff --git a/app/typing.ts b/app/typing.ts index b09722ab9..863203581 100644 --- a/app/typing.ts +++ b/app/typing.ts @@ -7,3 +7,5 @@ export interface RequestMessage { role: MessageRole; content: string; } + +export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792"; diff --git a/app/utils.ts b/app/utils.ts index 2f2c8ae95..2a2922907 100644 --- a/app/utils.ts +++ b/app/utils.ts @@ -194,6 +194,7 @@ export function autoGrowTextArea(dom: HTMLTextAreaElement) { measureDom.style.width = width + "px"; measureDom.innerText = dom.value !== "" ? dom.value : "1"; measureDom.style.fontSize = dom.style.fontSize; + measureDom.style.fontFamily = dom.style.fontFamily; const endWithEmptyLine = dom.value.endsWith("\n"); const height = parseFloat(window.getComputedStyle(measureDom).height); const singleLineHeight = parseFloat( @@ -265,3 +266,7 @@ export function isVisionModel(model: string) { visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo ); } + +export function isDalle3(model: string) { + return "dall-e-3" === model; +} diff --git a/app/utils/cloud/webdav.ts b/app/utils/cloud/webdav.ts index 0ca781b75..aa42649ca 100644 --- a/app/utils/cloud/webdav.ts +++ b/app/utils/cloud/webdav.ts @@ -14,8 +14,8 @@ export function createWebDavClient(store: SyncStore) { return { async check() { try { - const res = await fetch(this.path(folder, proxyUrl), { - method: "MKCOL", + const res = await fetch(this.path(folder, proxyUrl, "MKCOL"), { + method: "GET", headers: this.headers(), }); const success = [201, 200, 404, 405, 301, 302, 307, 308].includes( @@ -42,6 +42,10 @@ export function createWebDavClient(store: SyncStore) { console.log("[WebDav] get key = ", key, res.status, res.statusText); + if (404 == res.status) { + return ""; + } + return await res.text(); }, @@ -62,7 +66,7 @@ export function createWebDavClient(store: SyncStore) { authorization: `Basic ${auth}`, }; }, - path(path: string, proxyUrl: string = "") { + path(path: string, proxyUrl: string = "", proxyMethod: string = "") { if (path.startsWith("/")) { path = path.slice(1); } @@ -78,9 +82,13 @@ export function createWebDavClient(store: SyncStore) { let u = new URL(proxyUrl + pathPrefix + path); // add query params u.searchParams.append("endpoint", config.endpoint); + proxyMethod && u.searchParams.append("proxy_method", proxyMethod); url = u.toString(); } catch (e) { url = pathPrefix + path + "?endpoint=" + config.endpoint; + if (proxyMethod) { + url += "&proxy_method=" + proxyMethod; + } } return url; diff --git a/app/utils/hmac.ts b/app/utils/hmac.ts new file mode 100644 index 000000000..96292dac3 --- /dev/null +++ b/app/utils/hmac.ts @@ -0,0 +1,246 @@ +// From https://gist.github.com/guillermodlpa/f6d955f838e9b10d1ef95b8e259b2c58 +// From https://gist.github.com/stevendesu/2d52f7b5e1f1184af3b667c0b5e054b8 + +// To ensure cross-browser support even without a proper SubtleCrypto +// impelmentation (or without access to the impelmentation, as is the case with +// Chrome loaded over HTTP instead of HTTPS), this library can create SHA-256 +// HMAC signatures using nothing but raw JavaScript + +/* eslint-disable no-magic-numbers, id-length, no-param-reassign, new-cap */ + +// By giving internal functions names that we can mangle, future calls to +// them are reduced to a single byte (minor space savings in minified file) +const uint8Array = Uint8Array; +const uint32Array = Uint32Array; +const pow = Math.pow; + +// Will be initialized below +// Using a Uint32Array instead of a simple array makes the minified code +// a bit bigger (we lose our `unshift()` hack), but comes with huge +// performance gains +const DEFAULT_STATE = new uint32Array(8); +const ROUND_CONSTANTS: number[] = []; + +// Reusable object for expanded message +// Using a Uint32Array instead of a simple array makes the minified code +// 7 bytes larger, but comes with huge performance gains +const M = new uint32Array(64); + +// After minification the code to compute the default state and round +// constants is smaller than the output. More importantly, this serves as a +// good educational aide for anyone wondering where the magic numbers come +// from. No magic numbers FTW! +function getFractionalBits(n: number) { + return ((n - (n | 0)) * pow(2, 32)) | 0; +} + +let n = 2; +let nPrime = 0; +while (nPrime < 64) { + // isPrime() was in-lined from its original function form to save + // a few bytes + let isPrime = true; + // Math.sqrt() was replaced with pow(n, 1/2) to save a few bytes + // var sqrtN = pow(n, 1 / 2); + // So technically to determine if a number is prime you only need to + // check numbers up to the square root. However this function only runs + // once and we're only computing the first 64 primes (up to 311), so on + // any modern CPU this whole function runs in a couple milliseconds. + // By going to n / 2 instead of sqrt(n) we net 8 byte savings and no + // scaling performance cost + for (let factor = 2; factor <= n / 2; factor++) { + if (n % factor === 0) { + isPrime = false; + } + } + if (isPrime) { + if (nPrime < 8) { + DEFAULT_STATE[nPrime] = getFractionalBits(pow(n, 1 / 2)); + } + ROUND_CONSTANTS[nPrime] = getFractionalBits(pow(n, 1 / 3)); + + nPrime++; + } + + n++; +} + +// For cross-platform support we need to ensure that all 32-bit words are +// in the same endianness. A UTF-8 TextEncoder will return BigEndian data, +// so upon reading or writing to our ArrayBuffer we'll only swap the bytes +// if our system is LittleEndian (which is about 99% of CPUs) +const LittleEndian = !!new uint8Array(new uint32Array([1]).buffer)[0]; + +function convertEndian(word: number) { + if (LittleEndian) { + return ( + // byte 1 -> byte 4 + (word >>> 24) | + // byte 2 -> byte 3 + (((word >>> 16) & 0xff) << 8) | + // byte 3 -> byte 2 + ((word & 0xff00) << 8) | + // byte 4 -> byte 1 + (word << 24) + ); + } else { + return word; + } +} + +function rightRotate(word: number, bits: number) { + return (word >>> bits) | (word << (32 - bits)); +} + +function sha256(data: Uint8Array) { + // Copy default state + const STATE = DEFAULT_STATE.slice(); + + // Caching this reduces occurrences of ".length" in minified JavaScript + // 3 more byte savings! :D + const legth = data.length; + + // Pad data + const bitLength = legth * 8; + const newBitLength = 512 - ((bitLength + 64) % 512) - 1 + bitLength + 65; + + // "bytes" and "words" are stored BigEndian + const bytes = new uint8Array(newBitLength / 8); + const words = new uint32Array(bytes.buffer); + + bytes.set(data, 0); + // Append a 1 + bytes[legth] = 0b10000000; + // Store length in BigEndian + words[words.length - 1] = convertEndian(bitLength); + + // Loop iterator (avoid two instances of "var") -- saves 2 bytes + let round; + + // Process blocks (512 bits / 64 bytes / 16 words at a time) + for (let block = 0; block < newBitLength / 32; block += 16) { + const workingState = STATE.slice(); + + // Rounds + for (round = 0; round < 64; round++) { + let MRound; + // Expand message + if (round < 16) { + // Convert to platform Endianness for later math + MRound = convertEndian(words[block + round]); + } else { + const gamma0x = M[round - 15]; + const gamma1x = M[round - 2]; + MRound = + M[round - 7] + + M[round - 16] + + (rightRotate(gamma0x, 7) ^ + rightRotate(gamma0x, 18) ^ + (gamma0x >>> 3)) + + (rightRotate(gamma1x, 17) ^ + rightRotate(gamma1x, 19) ^ + (gamma1x >>> 10)); + } + + // M array matches platform endianness + M[round] = MRound |= 0; + + // Computation + const t1 = + (rightRotate(workingState[4], 6) ^ + rightRotate(workingState[4], 11) ^ + rightRotate(workingState[4], 25)) + + ((workingState[4] & workingState[5]) ^ + (~workingState[4] & workingState[6])) + + workingState[7] + + MRound + + ROUND_CONSTANTS[round]; + const t2 = + (rightRotate(workingState[0], 2) ^ + rightRotate(workingState[0], 13) ^ + rightRotate(workingState[0], 22)) + + ((workingState[0] & workingState[1]) ^ + (workingState[2] & (workingState[0] ^ workingState[1]))); + for (let i = 7; i > 0; i--) { + workingState[i] = workingState[i - 1]; + } + workingState[0] = (t1 + t2) | 0; + workingState[4] = (workingState[4] + t1) | 0; + } + + // Update state + for (round = 0; round < 8; round++) { + STATE[round] = (STATE[round] + workingState[round]) | 0; + } + } + + // Finally the state needs to be converted to BigEndian for output + // And we want to return a Uint8Array, not a Uint32Array + return new uint8Array( + new uint32Array( + STATE.map(function (val) { + return convertEndian(val); + }), + ).buffer, + ); +} + +function hmac(key: Uint8Array, data: ArrayLike) { + if (key.length > 64) key = sha256(key); + + if (key.length < 64) { + const tmp = new Uint8Array(64); + tmp.set(key, 0); + key = tmp; + } + + // Generate inner and outer keys + const innerKey = new Uint8Array(64); + const outerKey = new Uint8Array(64); + for (let i = 0; i < 64; i++) { + innerKey[i] = 0x36 ^ key[i]; + outerKey[i] = 0x5c ^ key[i]; + } + + // Append the innerKey + const msg = new Uint8Array(data.length + 64); + msg.set(innerKey, 0); + msg.set(data, 64); + + // Has the previous message and append the outerKey + const result = new Uint8Array(64 + 32); + result.set(outerKey, 0); + result.set(sha256(msg), 64); + + // Hash the previous message + return sha256(result); +} + +// Convert a string to a Uint8Array, SHA-256 it, and convert back to string +const encoder = new TextEncoder(); + +export function sign( + inputKey: string | Uint8Array, + inputData: string | Uint8Array, +) { + const key = + typeof inputKey === "string" ? encoder.encode(inputKey) : inputKey; + const data = + typeof inputData === "string" ? encoder.encode(inputData) : inputData; + return hmac(key, data); +} + +export function hex(bin: Uint8Array) { + return bin.reduce((acc, val) => { + const hexVal = "00" + val.toString(16); + return acc + hexVal.substring(hexVal.length - 2); + }, ""); +} + +export function hash(str: string) { + return hex(sha256(encoder.encode(str))); +} + +export function hashWithSecret(str: string, secret: string) { + return hex(sign(secret, str)).toString(); +} diff --git a/app/utils/model.ts b/app/utils/model.ts index 4de0eb8d9..0b62b53be 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -1,12 +1,42 @@ import { DEFAULT_MODELS } from "../constant"; import { LLMModel } from "../client/api"; +const CustomSeq = { + val: -1000, //To ensure the custom model located at front, start from -1000, refer to constant.ts + cache: new Map(), + next: (id: string) => { + if (CustomSeq.cache.has(id)) { + return CustomSeq.cache.get(id) as number; + } else { + let seq = CustomSeq.val++; + CustomSeq.cache.set(id, seq); + return seq; + } + }, +}; + const customProvider = (providerName: string) => ({ id: providerName.toLowerCase(), providerName: providerName, providerType: "custom", + sorted: CustomSeq.next(providerName), }); +/** + * Sorts an array of models based on specified rules. + * + * First, sorted by provider; if the same, sorted by model + */ +const sortModelTable = (models: ReturnType) => + models.sort((a, b) => { + if (a.provider && b.provider) { + let cmp = a.provider.sorted - b.provider.sorted; + return cmp === 0 ? a.sorted - b.sorted : cmp; + } else { + return a.sorted - b.sorted; + } + }); + export function collectModelTable( models: readonly LLMModel[], customModels: string, @@ -17,6 +47,7 @@ export function collectModelTable( available: boolean; name: string; displayName: string; + sorted: number; provider?: LLMModel["provider"]; // Marked as optional isDefault?: boolean; } @@ -84,6 +115,7 @@ export function collectModelTable( displayName: displayName || customModelName, available, provider, // Use optional chaining + sorted: CustomSeq.next(`${customModelName}@${provider?.id}`), }; } } @@ -99,13 +131,16 @@ export function collectModelTableWithDefaultModel( ) { let modelTable = collectModelTable(models, customModels); if (defaultModel && defaultModel !== "") { - if (defaultModel.includes('@')) { + if (defaultModel.includes("@")) { if (defaultModel in modelTable) { modelTable[defaultModel].isDefault = true; } } else { for (const key of Object.keys(modelTable)) { - if (modelTable[key].available && key.split('@').shift() == defaultModel) { + if ( + modelTable[key].available && + key.split("@").shift() == defaultModel + ) { modelTable[key].isDefault = true; break; } @@ -123,7 +158,9 @@ export function collectModels( customModels: string, ) { const modelTable = collectModelTable(models, customModels); - const allModels = Object.values(modelTable); + let allModels = Object.values(modelTable); + + allModels = sortModelTable(allModels); return allModels; } @@ -138,7 +175,10 @@ export function collectModelsWithDefaultModel( customModels, defaultModel, ); - const allModels = Object.values(modelTable); + let allModels = Object.values(modelTable); + + allModels = sortModelTable(allModels); + return allModels; } diff --git a/app/utils/tencent.ts b/app/utils/tencent.ts new file mode 100644 index 000000000..92772703c --- /dev/null +++ b/app/utils/tencent.ts @@ -0,0 +1,102 @@ +import { sign, hash as getHash, hex } from "./hmac"; + +// 使用 SHA-256 和 secret 进行 HMAC 加密 +function sha256(message: any, secret: any, encoding?: string) { + const result = sign(secret, message); + return encoding == "hex" ? hex(result).toString() : result; +} + +function getDate(timestamp: number) { + const date = new Date(timestamp * 1000); + const year = date.getUTCFullYear(); + const month = ("0" + (date.getUTCMonth() + 1)).slice(-2); + const day = ("0" + date.getUTCDate()).slice(-2); + return `${year}-${month}-${day}`; +} + +export async function getHeader( + payload: any, + SECRET_ID: string, + SECRET_KEY: string, +) { + // https://cloud.tencent.com/document/api/1729/105701 + + const endpoint = "hunyuan.tencentcloudapi.com"; + const service = "hunyuan"; + const region = ""; // optional + const action = "ChatCompletions"; + const version = "2023-09-01"; + const timestamp = Math.floor(Date.now() / 1000); + //时间处理, 获取世界时间日期 + const date = getDate(timestamp); + + // ************* 步骤 1:拼接规范请求串 ************* + + const hashedRequestPayload = getHash(payload); + const httpRequestMethod = "POST"; + const contentType = "application/json"; + const canonicalUri = "/"; + const canonicalQueryString = ""; + const canonicalHeaders = + `content-type:${contentType}\n` + + "host:" + + endpoint + + "\n" + + "x-tc-action:" + + action.toLowerCase() + + "\n"; + const signedHeaders = "content-type;host;x-tc-action"; + + const canonicalRequest = [ + httpRequestMethod, + canonicalUri, + canonicalQueryString, + canonicalHeaders, + signedHeaders, + hashedRequestPayload, + ].join("\n"); + + // ************* 步骤 2:拼接待签名字符串 ************* + const algorithm = "TC3-HMAC-SHA256"; + const hashedCanonicalRequest = getHash(canonicalRequest); + const credentialScope = date + "/" + service + "/" + "tc3_request"; + const stringToSign = + algorithm + + "\n" + + timestamp + + "\n" + + credentialScope + + "\n" + + hashedCanonicalRequest; + + // ************* 步骤 3:计算签名 ************* + const kDate = sha256(date, "TC3" + SECRET_KEY); + const kService = sha256(service, kDate); + const kSigning = sha256("tc3_request", kService); + const signature = sha256(stringToSign, kSigning, "hex"); + + // ************* 步骤 4:拼接 Authorization ************* + const authorization = + algorithm + + " " + + "Credential=" + + SECRET_ID + + "/" + + credentialScope + + ", " + + "SignedHeaders=" + + signedHeaders + + ", " + + "Signature=" + + signature; + + return { + Authorization: authorization, + "Content-Type": contentType, + Host: endpoint, + "X-TC-Action": action, + "X-TC-Timestamp": timestamp.toString(), + "X-TC-Version": version, + "X-TC-Region": region, + }; +} diff --git a/package.json b/package.json index ed5edb043..eb0a5ef67 100644 --- a/package.json +++ b/package.json @@ -4,14 +4,14 @@ "license": "mit", "scripts": { "mask": "npx tsx app/masks/build.ts", - "mask:watch": "npx watch 'yarn mask' app/masks", - "dev": "yarn run mask:watch & next dev", + "mask:watch": "npx watch \"yarn mask\" app/masks", + "dev": "concurrently -r \"yarn run mask:watch\" \"next dev\"", "build": "yarn mask && cross-env BUILD_MODE=standalone next build", "start": "next start", "lint": "next lint", "export": "yarn mask && cross-env BUILD_MODE=export BUILD_APP=1 next build", - "export:dev": "yarn mask:watch & cross-env BUILD_MODE=export BUILD_APP=1 next dev", - "app:dev": "yarn mask:watch & yarn tauri dev", + "export:dev": "concurrently -r \"yarn mask:watch\" \"cross-env BUILD_MODE=export BUILD_APP=1 next dev\"", + "app:dev": "concurrently -r \"yarn mask:watch\" \"yarn tauri dev\"", "app:build": "yarn mask && yarn tauri build", "prompts": "node ./scripts/fetch-prompts.mjs", "prepare": "husky install", @@ -28,6 +28,7 @@ "fuse.js": "^7.0.0", "heic2any": "^0.0.4", "html-to-image": "^1.11.11", + "lodash-es": "^4.17.21", "mermaid": "^10.6.1", "nanoid": "^5.0.3", "next": "^14.1.1", @@ -48,11 +49,13 @@ }, "devDependencies": { "@tauri-apps/cli": "1.5.11", + "@types/lodash-es": "^4.17.12", "@types/node": "^20.11.30", "@types/react": "^18.2.70", "@types/react-dom": "^18.2.7", "@types/react-katex": "^3.0.0", "@types/spark-md5": "^3.0.4", + "concurrently": "^8.2.2", "cross-env": "^7.0.3", "eslint": "^8.49.0", "eslint-config-next": "13.4.19", diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index feef57d16..245254eff 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -9,7 +9,7 @@ }, "package": { "productName": "NextChat", - "version": "2.14.0" + "version": "2.14.1" }, "tauri": { "allowlist": { diff --git a/yarn.lock b/yarn.lock index c323a5c38..793c845d7 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1035,6 +1035,13 @@ dependencies: regenerator-runtime "^0.14.0" +"@babel/runtime@^7.21.0": + version "7.25.0" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.0.tgz#3af9a91c1b739c569d5d80cc917280919c544ecb" + integrity sha512-7dRy4DwXwtzBrPbZflqxnvfxLF8kdZXPkhymtDeFoFqE6ldzjQFgYTtYIFARcLEYDrqfBfYcZt1WqFxRoyC9Rw== + dependencies: + regenerator-runtime "^0.14.0" + "@babel/template@^7.18.10", "@babel/template@^7.20.7": version "7.20.7" resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.20.7.tgz#a15090c2839a83b02aa996c0b4994005841fd5a8" @@ -1697,6 +1704,18 @@ resolved "https://registry.yarnpkg.com/@types/katex/-/katex-0.14.0.tgz#b84c0afc3218069a5ad64fe2a95321881021b5fe" integrity sha512-+2FW2CcT0K3P+JMR8YG846bmDwplKUTsWgT2ENwdQ1UdVfRk3GQrh6Mi4sTopy30gI8Uau5CEqHTDZ6YvWIUPA== +"@types/lodash-es@^4.17.12": + version "4.17.12" + resolved "https://registry.npmmirror.com/@types/lodash-es/-/lodash-es-4.17.12.tgz#65f6d1e5f80539aa7cfbfc962de5def0cf4f341b" + integrity sha512-0NgftHUcV4v34VhXm8QBSftKVXtbkBG3ViCjs6+eJ5a6y6Mi/jiFGPc1sC7QK+9BFhWrURE3EOggmWaSxL9OzQ== + dependencies: + "@types/lodash" "*" + +"@types/lodash@*": + version "4.17.7" + resolved "https://registry.npmmirror.com/@types/lodash/-/lodash-4.17.7.tgz#2f776bcb53adc9e13b2c0dfd493dfcbd7de43612" + integrity sha512-8wTvZawATi/lsmNu10/j2hk1KEP0IvjubqPE3cu1Xz7xfXXt5oCq3SNUz4fMIP4XGF9Ky+Ue2tBA3hcS7LSBlA== + "@types/mdast@^3.0.0": version "3.0.11" resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.11.tgz#dc130f7e7d9306124286f6d6cee40cf4d14a3dc0" @@ -2269,7 +2288,7 @@ chalk@^2.0.0, chalk@^2.4.2: escape-string-regexp "^1.0.5" supports-color "^5.3.0" -chalk@^4.0.0: +chalk@^4.0.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -2335,6 +2354,15 @@ client-only@0.0.1: resolved "https://registry.yarnpkg.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + color-convert@^1.9.0: version "1.9.3" resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" @@ -2394,6 +2422,21 @@ concat-map@0.0.1: resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== +concurrently@^8.2.2: + version "8.2.2" + resolved "https://registry.yarnpkg.com/concurrently/-/concurrently-8.2.2.tgz#353141985c198cfa5e4a3ef90082c336b5851784" + integrity sha512-1dP4gpXFhei8IOtlXRE/T/4H88ElHgTiUzh71YUmtjTEHMSRS2Z/fgOxHSxxusGHogsRfxNq1vyAwxSC+EVyDg== + dependencies: + chalk "^4.1.2" + date-fns "^2.30.0" + lodash "^4.17.21" + rxjs "^7.8.1" + shell-quote "^1.8.1" + spawn-command "0.0.2" + supports-color "^8.1.1" + tree-kill "^1.2.2" + yargs "^17.7.2" + convert-source-map@^1.7.0: version "1.9.0" resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.9.0.tgz#7faae62353fb4213366d0ca98358d22e8368b05f" @@ -2801,6 +2844,13 @@ data-uri-to-buffer@^4.0.0: resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz#d8feb2b2881e6a4f58c2e08acfd0e2834e26222e" integrity sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A== +date-fns@^2.30.0: + version "2.30.0" + resolved "https://registry.yarnpkg.com/date-fns/-/date-fns-2.30.0.tgz#f367e644839ff57894ec6ac480de40cae4b0f4d0" + integrity sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw== + dependencies: + "@babel/runtime" "^7.21.0" + dayjs@^1.11.7: version "1.11.7" resolved "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.7.tgz#4b296922642f70999544d1144a2c25730fce63e2" @@ -3562,6 +3612,11 @@ gensync@^1.0.0-beta.2: resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== +get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@^1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.0.tgz#7ad1dc0535f3a2904bba075772763e5051f6d05f" @@ -5480,6 +5535,11 @@ remark-rehype@^10.0.0: mdast-util-to-hast "^12.1.0" unified "^10.0.0" +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== + resolve-from@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" @@ -5557,6 +5617,13 @@ rxjs@^7.8.0: dependencies: tslib "^2.1.0" +rxjs@^7.8.1: + version "7.8.1" + resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543" + integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg== + dependencies: + tslib "^2.1.0" + sade@^1.7.3: version "1.8.1" resolved "https://registry.yarnpkg.com/sade/-/sade-1.8.1.tgz#0a78e81d658d394887be57d2a409bf703a3b2701" @@ -5639,6 +5706,11 @@ shebang-regex@^3.0.0: resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== +shell-quote@^1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.1.tgz#6dbf4db75515ad5bac63b4f1894c3a154c766680" + integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== + side-channel@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" @@ -5717,6 +5789,11 @@ spark-md5@^3.0.2: resolved "https://registry.yarnpkg.com/spark-md5/-/spark-md5-3.0.2.tgz#7952c4a30784347abcee73268e473b9c0167e3fc" integrity sha512-wcFzz9cDfbuqe0FZzfi2or1sgyIrsDwmPwfZC4hiNidPdPINjeUwNfv5kldczoEAcjl9Y1L3SM7Uz2PUEQzxQw== +spawn-command@0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/spawn-command/-/spawn-command-0.0.2.tgz#9544e1a43ca045f8531aac1a48cb29bdae62338e" + integrity sha512-zC8zGoGkmc8J9ndvml8Xksr1Amk9qBujgbF0JAIWO7kXr43w0h/0GJNM/Vustixu+YE8N/MTrQ7N31FvHUACxQ== + stable@^0.1.8: version "0.1.8" resolved "https://registry.yarnpkg.com/stable/-/stable-0.1.8.tgz#836eb3c8382fe2936feaf544631017ce7d47a3cf" @@ -5739,7 +5816,7 @@ string-argv@^0.3.1: resolved "https://registry.yarnpkg.com/string-argv/-/string-argv-0.3.1.tgz#95e2fbec0427ae19184935f816d74aaa4c5c19da" integrity sha512-a1uQGz7IyVy9YwhqjZIZu1c8JO8dNIe20xBmSS6qu9kv++k3JGzCVmprbNN5Kn+BgzD5E7YYwg1CcjuJMRNsvg== -string-width@^4.1.0, string-width@^4.2.0: +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -5860,7 +5937,7 @@ supports-color@^7.1.0: dependencies: has-flag "^4.0.0" -supports-color@^8.0.0: +supports-color@^8.0.0, supports-color@^8.1.1: version "8.1.1" resolved "https://registry.npmmirror.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -5956,6 +6033,11 @@ to-regex-range@^5.0.1: dependencies: is-number "^7.0.0" +tree-kill@^1.2.2: + version "1.2.2" + resolved "https://registry.yarnpkg.com/tree-kill/-/tree-kill-1.2.2.tgz#4ca09a9092c88b73a7cdc5e8a01b507b0790a0cc" + integrity sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A== + trim-lines@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/trim-lines/-/trim-lines-3.0.1.tgz#d802e332a07df861c48802c04321017b1bd87338" @@ -6355,6 +6437,11 @@ wrappy@1: resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + yallist@^3.0.2: version "3.1.1" resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" @@ -6375,6 +6462,24 @@ yaml@^2.2.2: resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.3.1.tgz#02fe0975d23cd441242aa7204e09fc28ac2ac33b" integrity sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ== +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + +yargs@^17.7.2: + version "17.7.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + yocto-queue@^0.1.0: version "0.1.0" resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b"