diff --git a/.github/ISSUE_TEMPLATE/1_bug_report.yml b/.github/ISSUE_TEMPLATE/1_bug_report.yml new file mode 100644 index 000000000..b576629e3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_bug_report.yml @@ -0,0 +1,80 @@ +name: '🐛 Bug Report' +description: 'Report an bug' +title: '[Bug] ' +labels: ['bug'] +body: + - type: dropdown + attributes: + label: '📦 Deployment Method' + multiple: true + options: + - 'Official installation package' + - 'Vercel' + - 'Zeabur' + - 'Sealos' + - 'Netlify' + - 'Docker' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 Version' + validations: + required: true + + - type: dropdown + attributes: + label: '💻 Operating System' + multiple: true + options: + - 'Windows' + - 'macOS' + - 'Ubuntu' + - 'Other Linux' + - 'iOS' + - 'iPad OS' + - 'Android' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 System Version' + validations: + required: true + - type: dropdown + attributes: + label: '🌐 Browser' + multiple: true + options: + - 'Chrome' + - 'Edge' + - 'Safari' + - 'Firefox' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 Browser Version' + validations: + required: true + - type: textarea + attributes: + label: '🐛 Bug Description' + description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail. + validations: + required: true + - type: textarea + attributes: + label: '📷 Recurrence Steps' + description: A clear and concise description of how to recurrence. + - type: textarea + attributes: + label: '🚦 Expected Behavior' + description: A clear and concise description of what you expected to happen. + - type: textarea + attributes: + label: '📝 Additional Information' + description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml new file mode 100644 index 000000000..1977237de --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1_bug_report_cn.yml @@ -0,0 +1,80 @@ +name: '🐛 反馈缺陷' +description: '反馈一个问题/缺陷' +title: '[Bug] ' +labels: ['bug'] +body: + - type: dropdown + attributes: + label: '📦 部署方式' + multiple: true + options: + - '官方安装包' + - 'Vercel' + - 'Zeabur' + - 'Sealos' + - 'Netlify' + - 'Docker' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 软件版本' + validations: + required: true + + - type: dropdown + attributes: + label: '💻 系统环境' + multiple: true + options: + - 'Windows' + - 'macOS' + - 'Ubuntu' + - 'Other Linux' + - 'iOS' + - 'iPad OS' + - 'Android' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 系统版本' + validations: + required: true + - type: dropdown + attributes: + label: '🌐 浏览器' + multiple: true + options: + - 'Chrome' + - 'Edge' + - 'Safari' + - 'Firefox' + - 'Other' + validations: + required: true + - type: input + attributes: + label: '📌 浏览器版本' + validations: + required: true + - type: textarea + attributes: + label: '🐛 问题描述' + description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。 + validations: + required: true + - type: textarea + attributes: + label: '📷 复现步骤' + description: 请提供一个清晰且简洁的描述,说明如何复现问题。 + - type: textarea + attributes: + label: '🚦 期望结果' + description: 请提供一个清晰且简洁的描述,说明您期望发生什么。 + - type: textarea + attributes: + label: '📝 补充信息' + description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2_feature_request.yml b/.github/ISSUE_TEMPLATE/2_feature_request.yml new file mode 100644 index 000000000..8576e8a83 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_feature_request.yml @@ -0,0 +1,21 @@ +name: '🌠 Feature Request' +description: 'Suggest an idea' +title: '[Feature Request] ' +labels: ['enhancement'] +body: + - type: textarea + attributes: + label: '🥰 Feature Description' + description: Please add a clear and concise description of the problem you are seeking to solve with this feature request. + validations: + required: true + - type: textarea + attributes: + label: '🧐 Proposed Solution' + description: Describe the solution you'd like in a clear and concise manner. + validations: + required: true + - type: textarea + attributes: + label: '📝 Additional Information' + description: Add any other context about the problem here. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml new file mode 100644 index 000000000..c7a3cc370 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2_feature_request_cn.yml @@ -0,0 +1,21 @@ +name: '🌠 功能需求' +description: '提出需求或建议' +title: '[Feature Request] ' +labels: ['enhancement'] +body: + - type: textarea + attributes: + label: '🥰 需求描述' + description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。 + validations: + required: true + - type: textarea + attributes: + label: '🧐 解决方案' + description: 请清晰且简洁地描述您想要的解决方案。 + validations: + required: true + - type: textarea + attributes: + label: '📝 补充信息' + description: 在这里添加关于问题的任何其他背景信息。 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml deleted file mode 100644 index bdba257d2..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ /dev/null @@ -1,146 +0,0 @@ -name: Bug report -description: Create a report to help us improve -title: "[Bug] " -labels: ["bug"] - -body: - - type: markdown - attributes: - value: "## Describe the bug" - - type: textarea - id: bug-description - attributes: - label: "Bug Description" - description: "A clear and concise description of what the bug is." - placeholder: "Explain the bug..." - validations: - required: true - - - type: markdown - attributes: - value: "## To Reproduce" - - type: textarea - id: steps-to-reproduce - attributes: - label: "Steps to Reproduce" - description: "Steps to reproduce the behavior:" - placeholder: | - 1. Go to '...' - 2. Click on '....' - 3. Scroll down to '....' - 4. See error - validations: - required: true - - - type: markdown - attributes: - value: "## Expected behavior" - - type: textarea - id: expected-behavior - attributes: - label: "Expected Behavior" - description: "A clear and concise description of what you expected to happen." - placeholder: "Describe what you expected to happen..." - validations: - required: true - - - type: markdown - attributes: - value: "## Screenshots" - - type: textarea - id: screenshots - attributes: - label: "Screenshots" - description: "If applicable, add screenshots to help explain your problem." - placeholder: "Paste your screenshots here or write 'N/A' if not applicable..." - validations: - required: false - - - type: markdown - attributes: - value: "## Deployment" - - type: checkboxes - id: deployment - attributes: - label: "Deployment Method" - description: "Please select the deployment method you are using." - options: - - label: "Docker" - - label: "Vercel" - - label: "Server" - - - type: markdown - attributes: - value: "## Desktop (please complete the following information):" - - type: input - id: desktop-os - attributes: - label: "Desktop OS" - description: "Your desktop operating system." - placeholder: "e.g., Windows 10" - validations: - required: false - - type: input - id: desktop-browser - attributes: - label: "Desktop Browser" - description: "Your desktop browser." - placeholder: "e.g., Chrome, Safari" - validations: - required: false - - type: input - id: desktop-version - attributes: - label: "Desktop Browser Version" - description: "Version of your desktop browser." - placeholder: "e.g., 89.0" - validations: - required: false - - - type: markdown - attributes: - value: "## Smartphone (please complete the following information):" - - type: input - id: smartphone-device - attributes: - label: "Smartphone Device" - description: "Your smartphone device." - placeholder: "e.g., iPhone X" - validations: - required: false - - type: input - id: smartphone-os - attributes: - label: "Smartphone OS" - description: "Your smartphone operating system." - placeholder: "e.g., iOS 14.4" - validations: - required: false - - type: input - id: smartphone-browser - attributes: - label: "Smartphone Browser" - description: "Your smartphone browser." - placeholder: "e.g., Safari" - validations: - required: false - - type: input - id: smartphone-version - attributes: - label: "Smartphone Browser Version" - description: "Version of your smartphone browser." - placeholder: "e.g., 14" - validations: - required: false - - - type: markdown - attributes: - value: "## Additional Logs" - - type: textarea - id: additional-logs - attributes: - label: "Additional Logs" - description: "Add any logs about the problem here." - placeholder: "Paste any relevant logs here..." - validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml deleted file mode 100644 index 499781330..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Feature request -description: Suggest an idea for this project -title: "[Feature Request]: " -labels: ["enhancement"] - -body: - - type: markdown - attributes: - value: "## Is your feature request related to a problem? Please describe." - - type: textarea - id: problem-description - attributes: - label: Problem Description - description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]" - placeholder: "Explain the problem you are facing..." - validations: - required: true - - - type: markdown - attributes: - value: "## Describe the solution you'd like" - - type: textarea - id: desired-solution - attributes: - label: Solution Description - description: A clear and concise description of what you want to happen. - placeholder: "Describe the solution you'd like..." - validations: - required: true - - - type: markdown - attributes: - value: "## Describe alternatives you've considered" - - type: textarea - id: alternatives-considered - attributes: - label: Alternatives Considered - description: A clear and concise description of any alternative solutions or features you've considered. - placeholder: "Describe any alternative solutions or features you've considered..." - validations: - required: false - - - type: markdown - attributes: - value: "## Additional context" - - type: textarea - id: additional-context - attributes: - label: Additional Context - description: Add any other context or screenshots about the feature request here. - placeholder: "Add any other context or screenshots about the feature request here..." - validations: - required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000..3c4c90803 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,28 @@ +#### 💻 变更类型 | Change Type + + + +- [ ] feat +- [ ] fix +- [ ] refactor +- [ ] perf +- [ ] style +- [ ] test +- [ ] docs +- [ ] ci +- [ ] chore +- [ ] build + +#### 🔀 变更说明 | Description of Change + + + +#### 📝 补充信息 | Additional Information + + diff --git a/.gitignore b/.gitignore index b00b0e325..a24c6e047 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,4 @@ dev .env *.key -*.key.pub \ No newline at end of file +*.key.pub diff --git a/Dockerfile b/Dockerfile index 51a810fc5..ae9a17cdd 100644 --- a/Dockerfile +++ b/Dockerfile @@ -43,7 +43,7 @@ COPY --from=builder /app/.next/server ./.next/server EXPOSE 3000 CMD if [ -n "$PROXY_URL" ]; then \ - export HOSTNAME="127.0.0.1"; \ + export HOSTNAME="0.0.0.0"; \ protocol=$(echo $PROXY_URL | cut -d: -f1); \ host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ port=$(echo $PROXY_URL | cut -d: -f3); \ @@ -58,7 +58,7 @@ CMD if [ -n "$PROXY_URL" ]; then \ echo "[ProxyList]" >> $conf; \ echo "$protocol $host $port" >> $conf; \ cat /etc/proxychains.conf; \ - proxychains -f $conf node server.js --host 0.0.0.0; \ + proxychains -f $conf node server.js; \ else \ node server.js; \ fi diff --git a/LICENSE b/LICENSE index 542e91f4e..047f9431e 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2023 Zhang Yifei +Copyright (c) 2023-2024 Zhang Yifei Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index d496d68ed..24967c164 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) -[web-url]: https://chatgpt.nextweb.fun +[web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge [Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows @@ -181,6 +181,7 @@ Specify OpenAI organization ID. ### `AZURE_URL` (optional) > Example: https://{azure-resource-url}/openai/deployments/{deploy-name} +> if you config deployment name in `CUSTOM_MODELS`, you can remove `{deploy-name}` in `AZURE_URL` Azure deploy url. @@ -212,6 +213,34 @@ anthropic claude Api version. anthropic claude Api Url. +### `BAIDU_API_KEY` (optional) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (optional) + +Baidu Secret Key. + +### `BAIDU_URL` (optional) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (optional) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (optional) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (optional) + +Alibaba Cloud Api Key. + +### `ALIBABA_URL` (optional) + +Alibaba Cloud Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -245,6 +274,16 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model User `-all` to disable all default models, `+all` to enable all default models. +For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. +> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. + +For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. +> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. + +### `DEFAULT_MODEL` (optional) + +Change default model + ### `WHITE_WEBDEV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: diff --git a/README_CN.md b/README_CN.md index 6811102b6..5400bb276 100644 --- a/README_CN.md +++ b/README_CN.md @@ -95,6 +95,7 @@ OpenAI 接口代理 URL,如果你手动配置了 openai 接口代理,请填 ### `AZURE_URL` (可选) > 形如:https://{azure-resource-url}/openai/deployments/{deploy-name} +> 如果你已经在`CUSTOM_MODELS`中参考`displayName`的方式配置了{deploy-name},那么可以从`AZURE_URL`中移除`{deploy-name}` Azure 部署地址。 @@ -106,26 +107,54 @@ Azure 密钥。 Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 -### `GOOGLE_API_KEY` (optional) +### `GOOGLE_API_KEY` (可选) Google Gemini Pro 密钥. -### `GOOGLE_URL` (optional) +### `GOOGLE_URL` (可选) Google Gemini Pro Api Url. -### `ANTHROPIC_API_KEY` (optional) +### `ANTHROPIC_API_KEY` (可选) anthropic claude Api Key. -### `ANTHROPIC_API_VERSION` (optional) +### `ANTHROPIC_API_VERSION` (可选) anthropic claude Api version. -### `ANTHROPIC_URL` (optional) +### `ANTHROPIC_URL` (可选) anthropic claude Api Url. +### `BAIDU_API_KEY` (可选) + +Baidu Api Key. + +### `BAIDU_SECRET_KEY` (可选) + +Baidu Secret Key. + +### `BAIDU_URL` (可选) + +Baidu Api Url. + +### `BYTEDANCE_API_KEY` (可选) + +ByteDance Api Key. + +### `BYTEDANCE_URL` (可选) + +ByteDance Api Url. + +### `ALIBABA_API_KEY` (可选) + +阿里云(千问)Api Key. + +### `ALIBABA_URL` (可选) + +阿里云(千问)Api Url. + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 @@ -156,7 +185,19 @@ anthropic claude Api Url. 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 +在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项 + +在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 + + +### `DEFAULT_MODEL` (可选) + +更改默认模型 + ### `DEFAULT_INPUT_TEMPLATE` (可选) + 自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项 ## 开发 diff --git a/app/api/alibaba/[...path]/route.ts b/app/api/alibaba/[...path]/route.ts new file mode 100644 index 000000000..c97ce5934 --- /dev/null +++ b/app/api/alibaba/[...path]/route.ts @@ -0,0 +1,155 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + Alibaba, + ALIBABA_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import type { RequestPayload } from "@/app/client/platforms/openai"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Alibaba Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Qwen); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Alibaba] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, ""); + + let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + "X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Alibaba as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Alibaba] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts index 4264893d9..20f8d52e0 100644 --- a/app/api/anthropic/[...path]/route.ts +++ b/app/api/anthropic/[...path]/route.ts @@ -4,12 +4,14 @@ import { Anthropic, ApiPath, DEFAULT_MODELS, + ServiceProvider, ModelProvider, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; -import { collectModelTable } from "@/app/utils/model"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); @@ -113,7 +115,8 @@ async function request(req: NextRequest) { 10 * 60 * 1000, ); - const fetchUrl = `${baseUrl}${path}`; + // try rebuild url, when using cloudflare ai gateway in server + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}${path}`); const fetchOptions: RequestInit = { headers: { @@ -136,17 +139,19 @@ async function request(req: NextRequest) { // #1815 try to refuse some request to some models if (serverConfig.customModels && req.body) { try { - const modelTable = collectModelTable( - DEFAULT_MODELS, - serverConfig.customModels, - ); const clonedBody = await req.text(); fetchOptions.body = clonedBody; const jsonBody = JSON.parse(clonedBody) as { model?: string }; // not undefined and is false - if (modelTable[jsonBody?.model ?? ""].available === false) { + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Anthropic as string, + ) + ) { return NextResponse.json( { error: true, @@ -161,17 +166,17 @@ async function request(req: NextRequest) { console.error(`[Anthropic] filter`, e); } } - console.log("[Anthropic request]", fetchOptions.headers, req.method); + // console.log("[Anthropic request]", fetchOptions.headers, req.method); try { const res = await fetch(fetchUrl, fetchOptions); - console.log( - "[Anthropic response]", - res.status, - " ", - res.headers, - res.url, - ); + // console.log( + // "[Anthropic response]", + // res.status, + // " ", + // res.headers, + // res.url, + // ); // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); newHeaders.delete("www-authenticate"); diff --git a/app/api/auth.ts b/app/api/auth.ts index b750f2d17..e3b88702e 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -73,9 +73,18 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.Claude: systemApiKey = serverConfig.anthropicApiKey; break; + case ModelProvider.Doubao: + systemApiKey = serverConfig.bytedanceApiKey; + break; + case ModelProvider.Ernie: + systemApiKey = serverConfig.baiduApiKey; + break; + case ModelProvider.Qwen: + systemApiKey = serverConfig.alibabaApiKey; + break; case ModelProvider.GPT: default: - if (serverConfig.isAzure) { + if (req.nextUrl.pathname.includes("azure/deployments")) { systemApiKey = serverConfig.azureApiKey; } else { systemApiKey = serverConfig.apiKey; diff --git a/app/api/azure/[...path]/route.ts b/app/api/azure/[...path]/route.ts new file mode 100644 index 000000000..4a17de0c8 --- /dev/null +++ b/app/api/azure/[...path]/route.ts @@ -0,0 +1,57 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { ModelProvider } from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { requestOpenai } from "../../common"; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Azure Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const subpath = params.path.join("/"); + + const authResult = auth(req, ModelProvider.GPT); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + return await requestOpenai(req); + } catch (e) { + console.error("[Azure] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/baidu/[...path]/route.ts b/app/api/baidu/[...path]/route.ts new file mode 100644 index 000000000..94c9963c7 --- /dev/null +++ b/app/api/baidu/[...path]/route.ts @@ -0,0 +1,169 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BAIDU_BASE_URL, + ApiPath, + ModelProvider, + BAIDU_OATUH_URL, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; +import { getAccessToken } from "@/app/utils/baidu"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Baidu Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Ernie); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + if (!serverConfig.baiduApiKey || !serverConfig.baiduSecretKey) { + return NextResponse.json( + { + error: true, + message: `missing BAIDU_API_KEY or BAIDU_SECRET_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[Baidu] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, ""); + + let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const { access_token } = await getAccessToken( + serverConfig.baiduApiKey as string, + serverConfig.baiduSecretKey as string, + ); + const fetchUrl = `${baseUrl}${path}?access_token=${access_token}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Baidu as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[Baidu] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/bytedance/[...path]/route.ts b/app/api/bytedance/[...path]/route.ts new file mode 100644 index 000000000..336c837f0 --- /dev/null +++ b/app/api/bytedance/[...path]/route.ts @@ -0,0 +1,153 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + BYTEDANCE_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[ByteDance Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.Doubao); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[ByteDance] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; + +async function request(req: NextRequest) { + const controller = new AbortController(); + + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, ""); + + let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.ByteDance as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[ByteDance] filter`, e); + } + } + + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/api/common.ts b/app/api/common.ts index a75f2de5c..24453dd96 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,17 +1,24 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant"; -import { collectModelTable } from "../utils/model"; -import { makeAzurePath } from "../azure"; +import { + DEFAULT_MODELS, + OPENAI_BASE_URL, + GEMINI_BASE_URL, + ServiceProvider, +} from "../constant"; +import { isModelAvailableInServer } from "../utils/model"; +import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; const serverConfig = getServerSideConfig(); export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); + const isAzure = req.nextUrl.pathname.includes("azure/deployments"); + var authValue, authHeaderName = ""; - if (serverConfig.isAzure) { + if (isAzure) { authValue = req.headers .get("Authorization") @@ -31,7 +38,7 @@ export async function requestOpenai(req: NextRequest) { ); let baseUrl = - serverConfig.azureUrl || serverConfig.baseUrl || OPENAI_BASE_URL; + (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; if (!baseUrl.startsWith("http")) { baseUrl = `https://${baseUrl}`; @@ -51,17 +58,46 @@ export async function requestOpenai(req: NextRequest) { 10 * 60 * 1000, ); - if (serverConfig.isAzure) { - if (!serverConfig.azureApiVersion) { - return NextResponse.json({ - error: true, - message: `missing AZURE_API_VERSION in server env vars`, - }); + if (isAzure) { + const azureApiVersion = + req?.nextUrl?.searchParams?.get("api-version") || + serverConfig.azureApiVersion; + baseUrl = baseUrl.split("/deployments").shift() as string; + path = `${req.nextUrl.pathname.replaceAll( + "/api/azure/", + "", + )}?api-version=${azureApiVersion}`; + + // Forward compatibility: + // if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL + // then using default '{deploy-id}' + if (serverConfig.customModels && serverConfig.azureUrl) { + const modelName = path.split("/")[1]; + let realDeployName = ""; + serverConfig.customModels + .split(",") + .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) + .forEach((m) => { + const [fullName, displayName] = m.split("="); + const [_, providerName] = fullName.split("@"); + if (providerName === "azure" && !displayName) { + const [_, deployId] = (serverConfig?.azureUrl ?? "").split( + "deployments/", + ); + if (deployId) { + realDeployName = deployId; + } + } + }); + if (realDeployName) { + console.log("[Replace with DeployId", realDeployName); + path = path.replaceAll(modelName, realDeployName); + } } - path = makeAzurePath(path, serverConfig.azureApiVersion); } - const fetchUrl = `${baseUrl}/${path}`; + const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`); + console.log("fetchUrl", fetchUrl); const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", @@ -83,17 +119,24 @@ export async function requestOpenai(req: NextRequest) { // #1815 try to refuse gpt4 request if (serverConfig.customModels && req.body) { try { - const modelTable = collectModelTable( - DEFAULT_MODELS, - serverConfig.customModels, - ); const clonedBody = await req.text(); fetchOptions.body = clonedBody; const jsonBody = JSON.parse(clonedBody) as { model?: string }; // not undefined and is false - if (modelTable[jsonBody?.model ?? ""].available === false) { + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.OpenAI as string, + ) || + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.Azure as string, + ) + ) { return NextResponse.json( { error: true, @@ -112,16 +155,16 @@ export async function requestOpenai(req: NextRequest) { try { const res = await fetch(fetchUrl, fetchOptions); - // Extract the OpenAI-Organization header from the response - const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); + // Extract the OpenAI-Organization header from the response + const openaiOrganizationHeader = res.headers.get("OpenAI-Organization"); - // Check if serverConfig.openaiOrgId is defined and not an empty string - if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { - // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present - console.log("[Org ID]", openaiOrganizationHeader); - } else { - console.log("[Org ID] is not set up."); - } + // Check if serverConfig.openaiOrgId is defined and not an empty string + if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") { + // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present + console.log("[Org ID]", openaiOrganizationHeader); + } else { + console.log("[Org ID] is not set up."); + } // to prevent browser prompt for credentials const newHeaders = new Headers(res.headers); @@ -129,7 +172,6 @@ export async function requestOpenai(req: NextRequest) { // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); - // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV) // Also, this is to prevent the header from being sent to the client if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") { @@ -142,7 +184,6 @@ export async function requestOpenai(req: NextRequest) { // The browser will try to decode the response with brotli and fail newHeaders.delete("content-encoding"); - return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts index ebd192891..81e50538a 100644 --- a/app/api/google/[...path]/route.ts +++ b/app/api/google/[...path]/route.ts @@ -63,7 +63,9 @@ async function handle( ); } - const fetchUrl = `${baseUrl}/${path}?key=${key}`; + const fetchUrl = `${baseUrl}/${path}?key=${key}${ + req?.nextUrl?.searchParams?.get("alt") == "sse" ? "&alt=sse" : "" + }`; const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 816c2046b..01286fc1b 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -9,6 +9,14 @@ const mergedAllowedWebDavEndpoints = [ ...config.allowedWebDevEndpoints, ].filter((domain) => Boolean(domain.trim())); +const normalizeUrl = (url: string) => { + try { + return new URL(url); + } catch (err) { + return null; + } +}; + async function handle( req: NextRequest, { params }: { params: { path: string[] } }, @@ -24,9 +32,15 @@ async function handle( // Validate the endpoint to prevent potential SSRF attacks if ( - !mergedAllowedWebDavEndpoints.some( - (allowedEndpoint) => endpoint?.startsWith(allowedEndpoint), - ) + !endpoint || + !mergedAllowedWebDavEndpoints.some((allowedEndpoint) => { + const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint); + const normalizedEndpoint = normalizeUrl(endpoint as string); + + return normalizedEndpoint && + normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname && + normalizedEndpoint.pathname.startsWith(normalizedAllowedEndpoint.pathname); + }) ) { return NextResponse.json( { diff --git a/app/azure.ts b/app/azure.ts deleted file mode 100644 index 48406c55b..000000000 --- a/app/azure.ts +++ /dev/null @@ -1,9 +0,0 @@ -export function makeAzurePath(path: string, apiVersion: string) { - // should omit /v1 prefix - path = path.replaceAll("v1/", ""); - - // should add api-key to query string - path += `${path.includes("?") ? "&" : "?"}api-version=${apiVersion}`; - - return path; -} diff --git a/app/client/api.ts b/app/client/api.ts index 7bee546b4..c0c71480c 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -9,6 +9,10 @@ import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; import { ChatGPTApi } from "./platforms/openai"; import { GeminiProApi } from "./platforms/google"; import { ClaudeApi } from "./platforms/anthropic"; +import { ErnieApi } from "./platforms/baidu"; +import { DoubaoApi } from "./platforms/bytedance"; +import { QwenApi } from "./platforms/alibaba"; + export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -30,6 +34,7 @@ export interface RequestMessage { export interface LLMConfig { model: string; + providerName?: string; temperature?: number; top_p?: number; stream?: boolean; @@ -54,6 +59,7 @@ export interface LLMUsage { export interface LLMModel { name: string; + displayName?: string; available: boolean; provider: LLMModelProvider; } @@ -102,6 +108,15 @@ export class ClientApi { case ModelProvider.Claude: this.llm = new ClaudeApi(); break; + case ModelProvider.Ernie: + this.llm = new ErnieApi(); + break; + case ModelProvider.Doubao: + this.llm = new DoubaoApi(); + break; + case ModelProvider.Qwen: + this.llm = new QwenApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -155,37 +170,100 @@ export class ClientApi { export function getHeaders() { const accessStore = useAccessStore.getState(); + const chatStore = useChatStore.getState(); const headers: Record = { "Content-Type": "application/json", Accept: "application/json", }; - const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; - const isGoogle = modelConfig.model.startsWith("gemini"); - const isAzure = accessStore.provider === ServiceProvider.Azure; - const authHeader = isAzure ? "api-key" : "Authorization"; - const apiKey = isGoogle - ? accessStore.googleApiKey - : isAzure - ? accessStore.azureApiKey - : accessStore.openaiApiKey; - const clientConfig = getClientConfig(); - const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`; - const validString = (x: string) => x && x.length > 0; + const clientConfig = getClientConfig(); + + function getConfig() { + const modelConfig = chatStore.currentSession().mask.modelConfig; + const isGoogle = modelConfig.providerName == ServiceProvider.Google; + const isAzure = modelConfig.providerName === ServiceProvider.Azure; + const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; + const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; + const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance; + const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; + const isEnabledAccessControl = accessStore.enabledAccessControl(); + const apiKey = isGoogle + ? accessStore.googleApiKey + : isAzure + ? accessStore.azureApiKey + : isAnthropic + ? accessStore.anthropicApiKey + : isByteDance + ? accessStore.bytedanceApiKey + : isAlibaba + ? accessStore.alibabaApiKey + : accessStore.openaiApiKey; + return { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + isByteDance, + isAlibaba, + apiKey, + isEnabledAccessControl, + }; + } + + function getAuthHeader(): string { + return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + } + + function getBearerToken(apiKey: string, noBearer: boolean = false): string { + return validString(apiKey) + ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}` + : ""; + } + + function validString(x: string): boolean { + return x?.length > 0; + } + const { + isGoogle, + isAzure, + isAnthropic, + isBaidu, + apiKey, + isEnabledAccessControl, + } = getConfig(); // when using google api in app, not set auth header - if (!(isGoogle && clientConfig?.isApp)) { - // use user's api key first - if (validString(apiKey)) { - headers[authHeader] = makeBearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - validString(accessStore.accessCode) - ) { - headers[authHeader] = makeBearer( - ACCESS_CODE_PREFIX + accessStore.accessCode, - ); - } + if (isGoogle && clientConfig?.isApp) return headers; + // when using baidu api in app, not set auth header + if (isBaidu && clientConfig?.isApp) return headers; + + const authHeader = getAuthHeader(); + + const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + + if (bearerToken) { + headers[authHeader] = bearerToken; + } else if (isEnabledAccessControl && validString(accessStore.accessCode)) { + headers["Authorization"] = getBearerToken( + ACCESS_CODE_PREFIX + accessStore.accessCode, + ); } return headers; } + +export function getClientApi(provider: ServiceProvider): ClientApi { + switch (provider) { + case ServiceProvider.Google: + return new ClientApi(ModelProvider.GeminiPro); + case ServiceProvider.Anthropic: + return new ClientApi(ModelProvider.Claude); + case ServiceProvider.Baidu: + return new ClientApi(ModelProvider.Ernie); + case ServiceProvider.ByteDance: + return new ClientApi(ModelProvider.Doubao); + case ServiceProvider.Alibaba: + return new ClientApi(ModelProvider.Qwen); + default: + return new ClientApi(ModelProvider.GPT); + } +} diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts new file mode 100644 index 000000000..723ba774b --- /dev/null +++ b/app/client/platforms/alibaba.ts @@ -0,0 +1,268 @@ +"use client"; +import { + ApiPath, + Alibaba, + ALIBABA_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent, isVisionModel } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestInput { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; +} +interface RequestParam { + result_format: string; + incremental_output?: boolean; + temperature: number; + repetition_penalty?: number; + top_p: number; + max_tokens?: number; +} +interface RequestPayload { + model: string; + input: RequestInput; + parameters: RequestParam; +} + +export class QwenApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.alibabaUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res?.output?.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + model: modelConfig.model, + input: { + messages, + }, + parameters: { + result_format: "message", + incremental_output: shouldStream, + temperature: modelConfig.temperature, + // max_tokens: modelConfig.max_tokens, + top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1 + }, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(Alibaba.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: { + ...getHeaders(), + "X-DashScope-SSE": shouldStream ? "enable" : "disable", + }, + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Alibaba] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.output.choices as Array<{ + message: { content: string }; + }>; + const delta = choices[0]?.message?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Alibaba }; diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index e90c8f057..bf8faf837 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,5 +1,5 @@ import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, LLMApi, MultimodalContent } from "../api"; +import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; @@ -12,6 +12,7 @@ import { import Locale from "../../locales"; import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; export type MultiBlockContent = { type: "image" | "text"; @@ -190,11 +191,10 @@ export class ClaudeApi implements LLMApi { body: JSON.stringify(requestBody), signal: controller.signal, headers: { - "Content-Type": "application/json", - Accept: "application/json", - "x-api-key": accessStore.anthropicApiKey, + ...getHeaders(), // get common headers "anthropic-version": accessStore.anthropicApiVersion, - Authorization: getAuthKey(accessStore.anthropicApiKey), + // do not send `anthropicApiKey` in browser!!! + // Authorization: getAuthKey(accessStore.anthropicApiKey), }, }; @@ -376,7 +376,8 @@ export class ClaudeApi implements LLMApi { baseUrl = trimEnd(baseUrl, "/"); - return `${baseUrl}/${path}`; + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl(`${baseUrl}/${path}`); } } @@ -389,27 +390,3 @@ function trimEnd(s: string, end = " ") { return s; } - -function bearer(value: string) { - return `Bearer ${value.trim()}`; -} - -function getAuthKey(apiKey = "") { - const accessStore = useAccessStore.getState(); - const isApp = !!getClientConfig()?.isApp; - let authKey = ""; - - if (apiKey) { - // use user's api key first - authKey = bearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - !isApp && - !!accessStore.accessCode - ) { - // or use access code - authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); - } - - return authKey; -} diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts new file mode 100644 index 000000000..188b78bf9 --- /dev/null +++ b/app/client/platforms/baidu.ts @@ -0,0 +1,273 @@ +"use client"; +import { + ApiPath, + Baidu, + BAIDU_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getAccessToken } from "@/app/utils/baidu"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class ErnieApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.baiduUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + // do not use proxy for baidubce api + baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + // "error_code": 336006, "error_msg": "the length of messages must be an odd number", + if (messages.length % 2 === 0) { + messages.unshift({ + role: "user", + content: " ", + }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] Baidu payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + let chatPath = this.path(Baidu.ChatPath(modelConfig.model)); + + // getAccessToken can not run in browser, because cors error + if (!!getClientConfig()?.isApp) { + const accessStore = useAccessStore.getState(); + if (accessStore.useCustomConfig) { + if (accessStore.isValidBaidu()) { + const { access_token } = await getAccessToken( + accessStore.baiduApiKey, + accessStore.baiduSecretKey, + ); + chatPath = `${chatPath}${ + chatPath.includes("?") ? "&" : "?" + }access_token=${access_token}`; + } + } + } + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log("[Baidu] request response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = json?.result; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = resJson?.result; + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { Baidu }; diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts new file mode 100644 index 000000000..7677cafe1 --- /dev/null +++ b/app/client/platforms/bytedance.ts @@ -0,0 +1,255 @@ +"use client"; +import { + ApiPath, + ByteDance, + BYTEDANCE_BASE_URL, + REQUEST_TIMEOUT_MS, +} from "@/app/constant"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; + +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + MultimodalContent, +} from "../api"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; + +export interface OpenAIListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + +export class DoubaoApi implements LLMApi { + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.bytedanceUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + async chat(options: ChatOptions) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: getMessageTextContent(v), + })); + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + + const shouldStream = !!options.config.stream; + const requestPayload: RequestPayload = { + messages, + stream: shouldStream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(ByteDance.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + const finish = () => { + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[ByteDance] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { content: string }; + }>; + const delta = choices[0]?.delta?.content; + if (delta) { + remainText += delta; + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} +export { ByteDance }; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index e62d1078e..100067e8e 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -3,6 +3,12 @@ import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; +import Locale from "../../locales"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, getMessageImages, @@ -20,7 +26,7 @@ export class GeminiProApi implements LLMApi { ); } async chat(options: ChatOptions): Promise { - // const apiClient = this; + const apiClient = this; let multimodal = false; const messages = options.messages.map((v) => { let parts: any[] = [{ text: getMessageTextContent(v) }]; @@ -117,16 +123,13 @@ export class GeminiProApi implements LLMApi { const controller = new AbortController(); options.onController?.(controller); try { - // let baseUrl = accessStore.googleUrl; - - if (!baseUrl) { - baseUrl = isApp - ? DEFAULT_API_HOST + - "/api/proxy/google/" + - Google.ChatPath(modelConfig.model) - : this.path(Google.ChatPath(modelConfig.model)); + if (!baseUrl && isApp) { + baseUrl = DEFAULT_API_HOST + "/api/proxy/google/"; } - + baseUrl = `${baseUrl}/${Google.ChatPath(modelConfig.model)}`.replaceAll( + "//", + "/", + ); if (isApp) { baseUrl += `?key=${accessStore.googleApiKey}`; } @@ -148,10 +151,11 @@ export class GeminiProApi implements LLMApi { let remainText = ""; let finished = false; - let existingTexts: string[] = []; const finish = () => { - finished = true; - options.onFinish(existingTexts.join("")); + if (!finished) { + finished = true; + options.onFinish(responseText + remainText); + } }; // animate response to make it looks smooth @@ -176,72 +180,85 @@ export class GeminiProApi implements LLMApi { // start animaion animateResponseText(); - fetch( - baseUrl.replace("generateContent", "streamGenerateContent"), - chatPayload, - ) - .then((response) => { - const reader = response?.body?.getReader(); - const decoder = new TextDecoder(); - let partialData = ""; + controller.signal.onabort = finish; - return reader?.read().then(function processText({ - done, - value, - }): Promise { - if (done) { - if (response.status !== 200) { - try { - let data = JSON.parse(ensureProperEnding(partialData)); - if (data && data[0].error) { - options.onError?.(new Error(data[0].error.message)); - } else { - options.onError?.(new Error("Request failed")); - } - } catch (_) { - options.onError?.(new Error("Request failed")); - } - } + // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb + const chatPath = + baseUrl.replace("generateContent", "streamGenerateContent") + + (baseUrl.indexOf("?") > -1 ? "&alt=sse" : "?alt=sse"); + fetchEventSource(chatPath, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[Gemini] request response content type: ", + contentType, + ); - console.log("Stream complete"); - // options.onFinish(responseText + remainText); - finished = true; - return Promise.resolve(); - } - - partialData += decoder.decode(value, { stream: true }); + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); try { - let data = JSON.parse(ensureProperEnding(partialData)); + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} - const textArray = data.reduce( - (acc: string[], item: { candidates: any[] }) => { - const texts = item.candidates.map((candidate) => - candidate.content.parts - .map((part: { text: any }) => part.text) - .join(""), - ); - return acc.concat(texts); - }, - [], - ); - - if (textArray.length > existingTexts.length) { - const deltaArray = textArray.slice(existingTexts.length); - existingTexts = textArray; - remainText += deltaArray.join(""); - } - } catch (error) { - // console.log("[Response Animation] error: ", error,partialData); - // skip error message when parsing json + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); } - return reader.read().then(processText); - }); - }) - .catch((error) => { - console.error("Error:", error); - }); + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || finished) { + return finish(); + } + const text = msg.data; + try { + const json = JSON.parse(text); + const delta = apiClient.extractMessage(json); + + if (delta) { + remainText += delta; + } + + const blockReason = json?.promptFeedback?.blockReason; + if (blockReason) { + // being blocked + console.log(`[Google] [Safety Ratings] result:`, blockReason); + } + } catch (e) { + console.error("[Request] parse error", text, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); } else { const res = await fetch(baseUrl, chatPayload); clearTimeout(requestTimeoutId); @@ -255,7 +272,7 @@ export class GeminiProApi implements LLMApi { ), ); } - const message = this.extractMessage(resJson); + const message = apiClient.extractMessage(resJson); options.onFinish(message); } } catch (e) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index f35992630..98851c224 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -1,13 +1,17 @@ "use client"; +// azure and openai, using same models. so using same LLMApi. import { ApiPath, DEFAULT_API_HOST, DEFAULT_MODELS, OpenaiPath, + Azure, REQUEST_TIMEOUT_MS, ServiceProvider, } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { collectModelsWithDefaultModel } from "@/app/utils/model"; +import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { ChatOptions, @@ -24,7 +28,6 @@ import { } from "@fortaine/fetch-event-source"; import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; -import { makeAzurePath } from "@/app/azure"; import { getMessageTextContent, getMessageImages, @@ -40,7 +43,7 @@ export interface OpenAIListModelResponse { }>; } -interface RequestPayload { +export interface RequestPayload { messages: { role: "system" | "user" | "assistant"; content: string | MultimodalContent[]; @@ -62,39 +65,38 @@ export class ChatGPTApi implements LLMApi { let baseUrl = ""; + const isAzure = path.includes("deployments"); if (accessStore.useCustomConfig) { - const isAzure = accessStore.provider === ServiceProvider.Azure; - if (isAzure && !accessStore.isValidAzure()) { throw Error( "incomplete azure config, please check it in your settings page", ); } - if (isAzure) { - path = makeAzurePath(path, accessStore.azureApiVersion); - } - baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl; } if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI - : ApiPath.OpenAI; + const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; + baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; } if (baseUrl.endsWith("/")) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); } - if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) { + if ( + !baseUrl.startsWith("http") && + !isAzure && + !baseUrl.startsWith(ApiPath.OpenAI) + ) { baseUrl = "https://" + baseUrl; } console.log("[Proxy Endpoint] ", baseUrl, path); - return [baseUrl, path].join("/"); + // try rebuild url, when using cloudflare ai gateway in client + return cloudflareAIGatewayUrl([baseUrl, path].join("/")); } extractMessage(res: any) { @@ -113,6 +115,7 @@ export class ChatGPTApi implements LLMApi { ...useChatStore.getState().currentSession().mask.modelConfig, ...{ model: options.config.model, + providerName: options.config.providerName, }, }; @@ -140,7 +143,35 @@ export class ChatGPTApi implements LLMApi { options.onController?.(controller); try { - const chatPath = this.path(OpenaiPath.ChatPath); + let chatPath = ""; + if (modelConfig.providerName === ServiceProvider.Azure) { + // find model, and get displayName as deployName + const { models: configModels, customModels: configCustomModels } = + useAppConfig.getState(); + const { + defaultModel, + customModels: accessCustomModels, + useCustomConfig, + } = useAccessStore.getState(); + const models = collectModelsWithDefaultModel( + configModels, + [configCustomModels, accessCustomModels].join(","), + defaultModel, + ); + const model = models.find( + (model) => + model.name === modelConfig.model && + model?.provider?.providerName === ServiceProvider.Azure, + ); + chatPath = this.path( + Azure.ChatPath( + (model?.displayName ?? model?.name) as string, + useCustomConfig ? useAccessStore.getState().azureApiVersion : "", + ), + ); + } else { + chatPath = this.path(OpenaiPath.ChatPath); + } const chatPayload = { method: "POST", body: JSON.stringify(requestPayload), diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 061192504..f5e6788fa 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -88,6 +88,7 @@ import { Path, REQUEST_TIMEOUT_MS, UNFINISHED_INPUT, + ServiceProvider, } from "../constant"; import { Avatar } from "./emoji"; import { ContextPrompts, MaskAvatar, MaskConfig } from "./mask"; @@ -244,11 +245,11 @@ function useSubmitHandler() { }; } -export type RenderPompt = Pick; +export type RenderPrompt = Pick; export function PromptHints(props: { - prompts: RenderPompt[]; - onPromptSelect: (prompt: RenderPompt) => void; + prompts: RenderPrompt[]; + onPromptSelect: (prompt: RenderPrompt) => void; }) { const noPrompts = props.prompts.length === 0; const [selectIndex, setSelectIndex] = useState(0); @@ -448,6 +449,9 @@ export function ChatActions(props: { // switch model const currentModel = chatStore.currentSession().mask.modelConfig.model; + const currentProviderName = + chatStore.currentSession().mask.modelConfig?.providerName || + ServiceProvider.OpenAI; const allModels = useAllModels(); const models = useMemo(() => { const filteredModels = allModels.filter((m) => m.available); @@ -463,6 +467,14 @@ export function ChatActions(props: { return filteredModels; } }, [allModels]); + const currentModelName = useMemo(() => { + const model = models.find( + (m) => + m.name == currentModel && + m?.provider?.providerName == currentProviderName, + ); + return model?.displayName ?? ""; + }, [models, currentModel, currentProviderName]); const [showModelSelector, setShowModelSelector] = useState(false); const [showUploadImage, setShowUploadImage] = useState(false); @@ -479,13 +491,17 @@ export function ChatActions(props: { const isUnavaliableModel = !models.some((m) => m.name === currentModel); if (isUnavaliableModel && models.length > 0) { // show next model to default model if exist - let nextModel: ModelType = ( - models.find((model) => model.isDefault) || models[0] - ).name; - chatStore.updateCurrentSession( - (session) => (session.mask.modelConfig.model = nextModel), + let nextModel = models.find((model) => model.isDefault) || models[0]; + chatStore.updateCurrentSession((session) => { + session.mask.modelConfig.model = nextModel.name; + session.mask.modelConfig.providerName = nextModel?.provider + ?.providerName as ServiceProvider; + }); + showToast( + nextModel?.provider?.providerName == "ByteDance" + ? nextModel.displayName + : nextModel.name, ); - showToast(nextModel); } }, [chatStore, currentModel, models]); @@ -567,25 +583,40 @@ export function ChatActions(props: { setShowModelSelector(true)} - text={currentModel} + text={currentModelName} icon={} /> {showModelSelector && ( ({ - title: m.displayName, - value: m.name, + title: `${m.displayName}${ + m?.provider?.providerName + ? "(" + m?.provider?.providerName + ")" + : "" + }`, + value: `${m.name}@${m?.provider?.providerName}`, }))} onClose={() => setShowModelSelector(false)} onSelection={(s) => { if (s.length === 0) return; + const [model, providerName] = s[0].split("@"); chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.model = s[0] as ModelType; + session.mask.modelConfig.model = model as ModelType; + session.mask.modelConfig.providerName = + providerName as ServiceProvider; session.mask.syncGlobalConfig = false; }); - showToast(s[0]); + if (providerName == "ByteDance") { + const selectedModel = models.find( + (m) => + m.name == model && m?.provider?.providerName == providerName, + ); + showToast(selectedModel?.displayName ?? ""); + } else { + showToast(model); + } }} /> )} @@ -696,7 +727,7 @@ function _Chat() { // prompt hints const promptStore = usePromptStore(); - const [promptHints, setPromptHints] = useState([]); + const [promptHints, setPromptHints] = useState([]); const onSearch = useDebouncedCallback( (text: string) => { const matchedPrompts = promptStore.search(text); @@ -781,7 +812,7 @@ function _Chat() { setAutoScroll(true); }; - const onPromptSelect = (prompt: RenderPompt) => { + const onPromptSelect = (prompt: RenderPrompt) => { setTimeout(() => { setPromptHints([]); diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 20e240d93..948807d4c 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -36,11 +36,10 @@ import { toBlob, toPng } from "html-to-image"; import { DEFAULT_MASK_AVATAR } from "../store/mask"; import { prettyObject } from "../utils/format"; -import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant"; +import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { getClientConfig } from "../config/client"; -import { ClientApi } from "../client/api"; +import { type ClientApi, getClientApi } from "../client/api"; import { getMessageTextContent } from "../utils"; -import { identifyDefaultClaudeModel } from "../utils/checkers"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -313,14 +312,7 @@ export function PreviewActions(props: { const onRenderMsgs = (msgs: ChatMessage[]) => { setShouldExport(false); - var api: ClientApi; - if (config.modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + const api: ClientApi = getClientApi(config.modelConfig.providerName); api .share(msgs) diff --git a/app/components/home.tsx b/app/components/home.tsx index ffac64fda..e127c65f8 100644 --- a/app/components/home.tsx +++ b/app/components/home.tsx @@ -12,7 +12,7 @@ import LoadingIcon from "../icons/three-dots.svg"; import { getCSSVar, useMobileScreen } from "../utils"; import dynamic from "next/dynamic"; -import { ModelProvider, Path, SlotID } from "../constant"; +import { Path, SlotID } from "../constant"; import { ErrorBoundary } from "./error"; import { getISOLang, getLang } from "../locales"; @@ -27,9 +27,8 @@ import { SideBar } from "./sidebar"; import { useAppConfig } from "../store/config"; import { AuthPage } from "./auth"; import { getClientConfig } from "../config/client"; -import { ClientApi } from "../client/api"; +import { type ClientApi, getClientApi } from "../client/api"; import { useAccessStore } from "../store"; -import { identifyDefaultClaudeModel } from "../utils/checkers"; export function Loading(props: { noLogo?: boolean }) { return ( @@ -171,14 +170,8 @@ function Screen() { export function useLoadData() { const config = useAppConfig(); - var api: ClientApi; - if (config.modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + const api: ClientApi = getClientApi(config.modelConfig.providerName); + useEffect(() => { (async () => { const models = await api.llm.models(); diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index e46a018f4..346fd3a71 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -1,3 +1,4 @@ +import { ServiceProvider } from "@/app/constant"; import { ModalConfigValidator, ModelConfig } from "../store"; import Locale from "../locales"; @@ -10,25 +11,25 @@ export function ModelConfigList(props: { updateConfig: (updater: (config: ModelConfig) => void) => void; }) { const allModels = useAllModels(); + const value = `${props.modelConfig.model}@${props.modelConfig?.providerName}`; return ( <> + accessStore.update( + (access) => + (access.baiduUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => + (access.baiduApiKey = e.currentTarget.value), + ); + }} + /> + + + { + accessStore.update( + (access) => + (access.baiduSecretKey = e.currentTarget.value), + ); + }} + /> + + + )} + + {accessStore.provider === ServiceProvider.ByteDance && ( + <> + + + accessStore.update( + (access) => + (access.bytedanceUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => + (access.bytedanceApiKey = + e.currentTarget.value), + ); + }} + /> + + + )} + + {accessStore.provider === ServiceProvider.Alibaba && ( + <> + + + accessStore.update( + (access) => + (access.alibabaUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => + (access.alibabaApiKey = e.currentTarget.value), + ); + }} + /> + + + )} )} diff --git a/app/config/server.ts b/app/config/server.ts index b7c85ce6a..23557788b 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -35,6 +35,24 @@ declare global { // google tag manager GTM_ID?: string; + // anthropic only + ANTHROPIC_URL?: string; + ANTHROPIC_API_KEY?: string; + ANTHROPIC_API_VERSION?: string; + + // baidu only + BAIDU_URL?: string; + BAIDU_API_KEY?: string; + BAIDU_SECRET_KEY?: string; + + // bytedance only + BYTEDANCE_URL?: string; + BYTEDANCE_API_KEY?: string; + + // alibaba only + ALIBABA_URL?: string; + ALIBABA_API_KEY?: string; + // custom template for preprocessing user input DEFAULT_INPUT_TEMPLATE?: string; } @@ -93,6 +111,9 @@ export const getServerSideConfig = () => { const isGoogle = !!process.env.GOOGLE_API_KEY; const isAnthropic = !!process.env.ANTHROPIC_API_KEY; + const isBaidu = !!process.env.BAIDU_API_KEY; + const isBytedance = !!process.env.BYTEDANCE_API_KEY; + const isAlibaba = !!process.env.ALIBABA_API_KEY; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const randomIndex = Math.floor(Math.random() * apiKeys.length); @@ -124,6 +145,19 @@ export const getServerSideConfig = () => { anthropicApiVersion: process.env.ANTHROPIC_API_VERSION, anthropicUrl: process.env.ANTHROPIC_URL, + isBaidu, + baiduUrl: process.env.BAIDU_URL, + baiduApiKey: getApiKey(process.env.BAIDU_API_KEY), + baiduSecretKey: process.env.BAIDU_SECRET_KEY, + + isBytedance, + bytedanceApiKey: getApiKey(process.env.BYTEDANCE_API_KEY), + bytedanceUrl: process.env.BYTEDANCE_URL, + + isAlibaba, + alibabaUrl: process.env.ALIBABA_URL, + alibabaApiKey: getApiKey(process.env.ALIBABA_API_KEY), + gtmId: process.env.GTM_ID, needCode: ACCESS_CODES.size > 0, diff --git a/app/constant.ts b/app/constant.ts index 43660d6da..91c58531e 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -14,6 +14,13 @@ export const ANTHROPIC_BASE_URL = "https://api.anthropic.com"; export const GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/"; +export const BAIDU_BASE_URL = "https://aip.baidubce.com"; +export const BAIDU_OATUH_URL = `${BAIDU_BASE_URL}/oauth/2.0/token`; + +export const BYTEDANCE_BASE_URL = "https://ark.cn-beijing.volces.com"; + +export const ALIBABA_BASE_URL = "https://dashscope.aliyuncs.com/api/"; + export enum Path { Home = "/", Chat = "/chat", @@ -25,8 +32,13 @@ export enum Path { export enum ApiPath { Cors = "", + Azure = "/api/azure", OpenAI = "/api/openai", Anthropic = "/api/anthropic", + Google = "/api/google", + Baidu = "/api/baidu", + ByteDance = "/api/bytedance", + Alibaba = "/api/alibaba", } export enum SlotID { @@ -70,6 +82,9 @@ export enum ServiceProvider { Azure = "Azure", Google = "Google", Anthropic = "Anthropic", + Baidu = "Baidu", + ByteDance = "ByteDance", + Alibaba = "Alibaba", } // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings @@ -85,6 +100,9 @@ export enum ModelProvider { GPT = "GPT", GeminiPro = "GeminiPro", Claude = "Claude", + Ernie = "Ernie", + Doubao = "Doubao", + Qwen = "Qwen", } export const Anthropic = { @@ -102,6 +120,8 @@ export const OpenaiPath = { }; export const Azure = { + ChatPath: (deployName: string, apiVersion: string) => + `deployments/${deployName}/chat/completions?api-version=${apiVersion}`, ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}", }; @@ -110,6 +130,39 @@ export const Google = { ChatPath: (modelName: string) => `v1beta/models/${modelName}:generateContent`, }; +export const Baidu = { + ExampleEndpoint: BAIDU_BASE_URL, + ChatPath: (modelName: string) => { + let endpoint = modelName; + if (modelName === "ernie-4.0-8k") { + endpoint = "completions_pro"; + } + if (modelName === "ernie-4.0-8k-preview-0518") { + endpoint = "completions_adv_pro"; + } + if (modelName === "ernie-3.5-8k") { + endpoint = "completions"; + } + if (modelName === "ernie-speed-128k") { + endpoint = "ernie-speed-128k"; + } + if (modelName === "ernie-speed-8k") { + endpoint = "ernie_speed"; + } + return `rpc/2.0/ai_custom/v1/wenxinworkshop/chat/${endpoint}`; + }, +}; + +export const ByteDance = { + ExampleEndpoint: "https://ark.cn-beijing.volces.com/api/", + ChatPath: "api/v3/chat/completions", +}; + +export const Alibaba = { + ExampleEndpoint: ALIBABA_BASE_URL, + ChatPath: "v1/services/aigc/text-generation/generation", +}; + export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang // export const DEFAULT_SYSTEM_TEMPLATE = ` // You are ChatGPT, a large language model trained by {{ServiceProvider}}. @@ -159,6 +212,7 @@ const openaiModels = [ "gpt-4o-2024-05-13", "gpt-4-vision-preview", "gpt-4-turbo-2024-04-09", + "gpt-4-1106-preview", ]; const googleModels = [ @@ -175,6 +229,40 @@ const anthropicModels = [ "claude-3-sonnet-20240229", "claude-3-opus-20240229", "claude-3-haiku-20240307", + "claude-3-5-sonnet-20240620", +]; + +const baiduModels = [ + "ernie-4.0-turbo-8k", + "ernie-4.0-8k", + "ernie-4.0-8k-preview", + "ernie-4.0-8k-preview-0518", + "ernie-4.0-8k-latest", + "ernie-3.5-8k", + "ernie-3.5-8k-0205", + "ernie-speed-128k", + "ernie-speed-8k", + "ernie-lite-8k", + "ernie-tiny-8k" +]; + +const bytedanceModels = [ + "Doubao-lite-4k", + "Doubao-lite-32k", + "Doubao-lite-128k", + "Doubao-pro-4k", + "Doubao-pro-32k", + "Doubao-pro-128k", +]; + +const alibabaModes = [ + "qwen-turbo", + "qwen-plus", + "qwen-max", + "qwen-max-0428", + "qwen-max-0403", + "qwen-max-0107", + "qwen-max-longcontext", ]; export const DEFAULT_MODELS = [ @@ -187,6 +275,15 @@ export const DEFAULT_MODELS = [ providerType: "openai", }, })), + ...openaiModels.map((name) => ({ + name, + available: true, + provider: { + id: "azure", + providerName: "Azure", + providerType: "azure", + }, + })), ...googleModels.map((name) => ({ name, available: true, @@ -205,6 +302,33 @@ export const DEFAULT_MODELS = [ providerType: "anthropic", }, })), + ...baiduModels.map((name) => ({ + name, + available: true, + provider: { + id: "baidu", + providerName: "Baidu", + providerType: "baidu", + }, + })), + ...bytedanceModels.map((name) => ({ + name, + available: true, + provider: { + id: "bytedance", + providerName: "ByteDance", + providerType: "bytedance", + }, + })), + ...alibabaModes.map((name) => ({ + name, + available: true, + provider: { + id: "alibaba", + providerName: "Alibaba", + providerType: "alibaba", + }, + })), ] as const; export const CHAT_PAGE_SIZE = 15; diff --git a/app/layout.tsx b/app/layout.tsx index 5898b21a1..637b4556b 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -3,7 +3,7 @@ import "./styles/globals.scss"; import "./styles/markdown.scss"; import "./styles/highlight.scss"; import { getClientConfig } from "./config/client"; -import { type Metadata } from "next"; +import type { Metadata, Viewport } from "next"; import { SpeedInsights } from "@vercel/speed-insights/next"; import { getServerSideConfig } from "./config/server"; import { GoogleTagManager } from "@next/third-parties/google"; @@ -12,21 +12,22 @@ const serverConfig = getServerSideConfig(); export const metadata: Metadata = { title: "NextChat", description: "Your personal ChatGPT Chat Bot.", - viewport: { - width: "device-width", - initialScale: 1, - maximumScale: 1, - }, - themeColor: [ - { media: "(prefers-color-scheme: light)", color: "#fafafa" }, - { media: "(prefers-color-scheme: dark)", color: "#151515" }, - ], appleWebApp: { title: "NextChat", statusBarStyle: "default", }, }; +export const viewport: Viewport = { + width: "device-width", + initialScale: 1, + maximumScale: 1, + themeColor: [ + { media: "(prefers-color-scheme: light)", color: "#fafafa" }, + { media: "(prefers-color-scheme: dark)", color: "#151515" }, + ], +}; + export default function RootLayout({ children, }: { diff --git a/app/locales/cn.ts b/app/locales/cn.ts index fa75b0e38..43ea67633 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -351,6 +351,44 @@ const cn = { SubTitle: "设置内容过滤级别", }, }, + Baidu: { + ApiKey: { + Title: "API Key", + SubTitle: "使用自定义 Baidu API Key", + Placeholder: "Baidu API Key", + }, + SecretKey: { + Title: "Secret Key", + SubTitle: "使用自定义 Baidu Secret Key", + Placeholder: "Baidu Secret Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "不支持自定义前往.env配置", + }, + }, + ByteDance: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义 ByteDance API Key", + Placeholder: "ByteDance API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, + Alibaba: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义阿里云API Key", + Placeholder: "Alibaba Cloud API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, CustomModel: { Title: "自定义模型名", SubTitle: "增加自定义模型可选项,使用英文逗号隔开", diff --git a/app/locales/en.ts b/app/locales/en.ts index e546ce0f8..94c737550 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -326,7 +326,7 @@ const en: LocaleType = { Endpoint: { Title: "Endpoint Address", - SubTitle: "Example:", + SubTitle: "Example: ", }, ApiVerion: { @@ -334,6 +334,44 @@ const en: LocaleType = { SubTitle: "Select and input a specific API version", }, }, + Baidu: { + ApiKey: { + Title: "Baidu API Key", + SubTitle: "Use a custom Baidu API Key", + Placeholder: "Baidu API Key", + }, + SecretKey: { + Title: "Baidu Secret Key", + SubTitle: "Use a custom Baidu Secret Key", + Placeholder: "Baidu Secret Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "not supported, configure in .env", + }, + }, + ByteDance: { + ApiKey: { + Title: "ByteDance API Key", + SubTitle: "Use a custom ByteDance API Key", + Placeholder: "ByteDance API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, + Alibaba: { + ApiKey: { + Title: "Alibaba API Key", + SubTitle: "Use a custom Alibaba Cloud API Key", + Placeholder: "Alibaba Cloud API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, CustomModel: { Title: "Custom Models", SubTitle: "Custom model options, seperated by comma", @@ -347,7 +385,7 @@ const en: LocaleType = { Endpoint: { Title: "Endpoint Address", - SubTitle: "Example:", + SubTitle: "Example: ", }, ApiVersion: { diff --git a/app/locales/index.ts b/app/locales/index.ts index 6e8088a98..acdb3e878 100644 --- a/app/locales/index.ts +++ b/app/locales/index.ts @@ -97,7 +97,17 @@ function setItem(key: string, value: string) { function getLanguage() { try { - return navigator.language.toLowerCase(); + const locale = new Intl.Locale(navigator.language).maximize(); + const region = locale?.region?.toLowerCase(); + // 1. check region code in ALL_LANGS + if (AllLangs.includes(region as Lang)) { + return region as Lang; + } + // 2. check language code in ALL_LANGS + if (AllLangs.includes(locale.language as Lang)) { + return locale.language as Lang; + } + return DEFAULT_LANG; } catch { return DEFAULT_LANG; } @@ -110,15 +120,7 @@ export function getLang(): Lang { return savedLang as Lang; } - const lang = getLanguage(); - - for (const option of AllLangs) { - if (lang.includes(option)) { - return option; - } - } - - return DEFAULT_LANG; + return getLanguage(); } export function changeLang(lang: Lang) { diff --git a/app/locales/tw.ts b/app/locales/tw.ts index f4e819845..dc33aed79 100644 --- a/app/locales/tw.ts +++ b/app/locales/tw.ts @@ -4,11 +4,11 @@ import { SubmitKey } from "../store/config"; const isApp = !!getClientConfig()?.isApp; const tw = { - WIP: "該功能仍在開發中……", + WIP: "此功能仍在開發中……", Error: { Unauthorized: isApp - ? "檢測到無效 API Key,請前往[設定](/#/settings)頁檢查 API Key 是否設定正確。" - : "存取密碼不正確或未填寫,請前往[登入](/#/auth)頁輸入正確的存取密碼,或者在[設定](/#/settings)頁填入你自己的 OpenAI API Key。", + ? "偵測到無效的 API Key,請前往[設定](/#/settings)頁面檢查 API Key 是否設定正確。" + : "存取密碼不正確或尚未填寫,請前往[登入](/#/auth)頁面輸入正確的存取密碼,或者在[設定](/#/settings)頁面填入你自己的 OpenAI API Key。", }, Auth: { @@ -159,7 +159,7 @@ const tw = { }, InputTemplate: { Title: "使用者輸入預處理", - SubTitle: "使用者最新的一條訊息會填充到此範本", + SubTitle: "使用者最新的一則訊息會填充到此範本", }, Update: { @@ -194,19 +194,19 @@ const tw = { }, SyncType: { Title: "同步類型", - SubTitle: "選擇喜愛的同步伺服器", + SubTitle: "選擇偏好的同步伺服器", }, Proxy: { - Title: "啟用代理", - SubTitle: "在瀏覽器中同步時,必須啟用代理以避免跨域限制", + Title: "啟用代理伺服器", + SubTitle: "在瀏覽器中同步時,啟用代理伺服器以避免跨域限制", }, ProxyUrl: { - Title: "代理地址", - SubTitle: "僅適用於本專案自帶的跨域代理", + Title: "代理伺服器位置", + SubTitle: "僅適用於本專案內建的跨域代理", }, WebDav: { - Endpoint: "WebDAV 地址", + Endpoint: "WebDAV 位置", UserName: "使用者名稱", Password: "密碼", }, @@ -218,9 +218,9 @@ const tw = { }, }, - LocalState: "本地資料", + LocalState: "本機資料", Overview: (overview: any) => { - return `${overview.chat} 次對話,${overview.message} 條訊息,${overview.prompt} 條提示詞,${overview.mask} 個角色範本`; + return `${overview.chat} 次對話,${overview.message} 則訊息,${overview.prompt} 條提示詞,${overview.mask} 個角色範本`; }, ImportFailed: "匯入失敗", }, @@ -239,13 +239,13 @@ const tw = { Title: "停用提示詞自動補齊", SubTitle: "在輸入框開頭輸入 / 即可觸發自動補齊", }, - List: "自定義提示詞列表", + List: "自訂提示詞列表", ListCount: (builtin: number, custom: number) => - `內建 ${builtin} 條,使用者定義 ${custom} 條`, + `內建 ${builtin} 條,使用者自訂 ${custom} 條`, Edit: "編輯", Modal: { Title: "提示詞列表", - Add: "新增一條", + Add: "新增一則", Search: "搜尋提示詞", }, EditModal: { @@ -278,40 +278,40 @@ const tw = { Placeholder: "請輸入存取密碼", }, CustomEndpoint: { - Title: "自定義介面 (Endpoint)", - SubTitle: "是否使用自定義 Azure 或 OpenAI 服務", + Title: "自訂 API 端點 (Endpoint)", + SubTitle: "是否使用自訂 Azure 或 OpenAI 服務", }, Provider: { - Title: "模型服務商", - SubTitle: "切換不同的服務商", + Title: "模型供應商", + SubTitle: "切換不同的服務供應商", }, OpenAI: { ApiKey: { Title: "API Key", - SubTitle: "使用自定義 OpenAI Key 繞過密碼存取限制", + SubTitle: "使用自訂 OpenAI Key 繞過密碼存取限制", Placeholder: "OpenAI API Key", }, Endpoint: { - Title: "介面(Endpoint) 地址", - SubTitle: "除預設地址外,必須包含 http(s)://", + Title: "API 端點 (Endpoint) 位址", + SubTitle: "除預設位址外,必須包含 http(s)://", }, }, Azure: { ApiKey: { - Title: "介面金鑰", - SubTitle: "使用自定義 Azure Key 繞過密碼存取限制", + Title: "API 金鑰", + SubTitle: "使用自訂 Azure Key 繞過密碼存取限制", Placeholder: "Azure API Key", }, Endpoint: { - Title: "介面(Endpoint) 地址", - SubTitle: "樣例:", + Title: "API 端點 (Endpoint) 位址", + SubTitle: "範例:", }, ApiVerion: { - Title: "介面版本 (azure api version)", - SubTitle: "選擇指定的部分版本", + Title: "API 版本 (azure api version)", + SubTitle: "指定一個特定的 API 版本", }, }, Anthropic: { @@ -322,13 +322,13 @@ const tw = { }, Endpoint: { - Title: "終端地址", + Title: "端點位址", SubTitle: "範例:", }, ApiVerion: { Title: "API 版本 (claude api version)", - SubTitle: "選擇一個特定的 API 版本輸入", + SubTitle: "指定一個特定的 API 版本", }, }, Google: { @@ -339,7 +339,7 @@ const tw = { }, Endpoint: { - Title: "終端地址", + Title: "端點位址", SubTitle: "範例:", }, @@ -349,8 +349,8 @@ const tw = { }, }, CustomModel: { - Title: "自定義模型名", - SubTitle: "增加自定義模型可選項,使用英文逗號隔開", + Title: "自訂模型名稱", + SubTitle: "增加自訂模型可選擇項目,使用英文逗號隔開", }, }, @@ -400,7 +400,7 @@ const tw = { Context: { Toast: (x: any) => `已設定 ${x} 條前置上下文`, Edit: "前置上下文和歷史記憶", - Add: "新增一條", + Add: "新增一則", Clear: "上下文已清除", Revert: "恢復上下文", }, @@ -425,16 +425,16 @@ const tw = { EditModal: { Title: (readonly: boolean) => `編輯預設角色範本 ${readonly ? "(唯讀)" : ""}`, - Download: "下載預設", - Clone: "複製預設", + Download: "下載預設值", + Clone: "以此預設值建立副本", }, Config: { Avatar: "角色頭像", Name: "角色名稱", Sync: { - Title: "使用全域性設定", - SubTitle: "目前對話是否使用全域性模型設定", - Confirm: "目前對話的自定義設定將會被自動覆蓋,確認啟用全域性設定?", + Title: "使用全域設定", + SubTitle: "目前對話是否使用全域模型設定", + Confirm: "目前對話的自訂設定將會被自動覆蓋,確認啟用全域設定?", }, HideContext: { Title: "隱藏預設對話", @@ -450,15 +450,15 @@ const tw = { NewChat: { Return: "返回", Skip: "跳過", - NotShow: "不再呈現", + NotShow: "不再顯示", ConfirmNoShow: "確認停用?停用後可以隨時在設定中重新啟用。", Title: "挑選一個角色範本", SubTitle: "現在開始,與角色範本背後的靈魂思維碰撞", More: "搜尋更多", }, URLCommand: { - Code: "檢測到連結中已經包含存取密碼,是否自動填入?", - Settings: "檢測到連結中包含了預設設定,是否自動填入?", + Code: "偵測到連結中已經包含存取密碼,是否自動填入?", + Settings: "偵測到連結中包含了預設設定,是否自動填入?", }, UI: { Confirm: "確認", diff --git a/app/masks/build.ts b/app/masks/build.ts new file mode 100644 index 000000000..10c09ad75 --- /dev/null +++ b/app/masks/build.ts @@ -0,0 +1,25 @@ +import fs from "fs"; +import path from "path"; +import { CN_MASKS } from "./cn"; +import { TW_MASKS } from "./tw"; +import { EN_MASKS } from "./en"; + +import { type BuiltinMask } from "./typing"; + +const BUILTIN_MASKS: Record = { + cn: CN_MASKS, + tw: TW_MASKS, + en: EN_MASKS, +}; + +const dirname = path.dirname(__filename); + +fs.writeFile( + dirname + "/../../public/masks.json", + JSON.stringify(BUILTIN_MASKS, null, 4), + function (error) { + if (error) { + console.error("[Build] failed to build masks", error); + } + }, +); diff --git a/app/masks/index.ts b/app/masks/index.ts index aa4917e3e..92f21c6ae 100644 --- a/app/masks/index.ts +++ b/app/masks/index.ts @@ -22,6 +22,20 @@ export const BUILTIN_MASK_STORE = { }, }; -export const BUILTIN_MASKS: BuiltinMask[] = [...CN_MASKS, ...TW_MASKS, ...EN_MASKS].map( - (m) => BUILTIN_MASK_STORE.add(m), -); +export const BUILTIN_MASKS: BuiltinMask[] = []; + +if (typeof window != "undefined") { + // run in browser skip in next server + fetch("/masks.json") + .then((res) => res.json()) + .catch((error) => { + console.error("[Fetch] failed to fetch masks", error); + return { cn: [], tw: [], en: [] }; + }) + .then((masks) => { + const { cn = [], tw = [], en = [] } = masks; + return [...cn, ...tw, ...en].map((m) => { + BUILTIN_MASKS.push(BUILTIN_MASK_STORE.add(m)); + }); + }); +} diff --git a/app/store/access.ts b/app/store/access.ts index 737bcc807..24d7e175a 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -13,10 +13,33 @@ import { DEFAULT_CONFIG } from "./config"; let fetchState = 0; // 0 not fetch, 1 fetching, 2 done -const DEFAULT_OPENAI_URL = - getClientConfig()?.buildMode === "export" - ? DEFAULT_API_HOST + "/api/proxy/openai" - : ApiPath.OpenAI; +const isApp = getClientConfig()?.buildMode === "export"; + +const DEFAULT_OPENAI_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/openai" + : ApiPath.OpenAI; + +const DEFAULT_GOOGLE_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/google" + : ApiPath.Google; + +const DEFAULT_ANTHROPIC_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/anthropic" + : ApiPath.Anthropic; + +const DEFAULT_BAIDU_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/baidu" + : ApiPath.Baidu; + +const DEFAULT_BYTEDANCE_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/bytedance" + : ApiPath.ByteDance; + +const DEFAULT_ALIBABA_URL = isApp + ? DEFAULT_API_HOST + "/api/proxy/alibaba" + : ApiPath.Alibaba; + +console.log("DEFAULT_ANTHROPIC_URL", DEFAULT_ANTHROPIC_URL); const DEFAULT_ACCESS_STATE = { accessCode: "", @@ -34,15 +57,28 @@ const DEFAULT_ACCESS_STATE = { azureApiVersion: "2023-08-01-preview", // google ai studio - googleUrl: "", + googleUrl: DEFAULT_GOOGLE_URL, googleApiKey: "", googleApiVersion: "v1", googleSafetySettings: GoogleSafetySettingsThreshold.BLOCK_ONLY_HIGH, // anthropic + anthropicUrl: DEFAULT_ANTHROPIC_URL, anthropicApiKey: "", anthropicApiVersion: "2023-06-01", - anthropicUrl: "", + + // baidu + baiduUrl: DEFAULT_BAIDU_URL, + baiduApiKey: "", + baiduSecretKey: "", + + // bytedance + bytedanceUrl: DEFAULT_BYTEDANCE_URL, + bytedanceApiKey: "", + + // alibaba + alibabaUrl: DEFAULT_ALIBABA_URL, + alibabaApiKey: "", // server config needCode: true, @@ -80,6 +116,18 @@ export const useAccessStore = createPersistStore( return ensure(get(), ["anthropicApiKey"]); }, + isValidBaidu() { + return ensure(get(), ["baiduApiKey", "baiduSecretKey"]); + }, + + isValidByteDance() { + return ensure(get(), ["bytedanceApiKey"]); + }, + + isValidAlibaba() { + return ensure(get(), ["alibabaApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -89,6 +137,9 @@ export const useAccessStore = createPersistStore( this.isValidAzure() || this.isValidGoogle() || this.isValidAnthropic() || + this.isValidBaidu() || + this.isValidByteDance() || + this.isValidAlibaba() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) ); diff --git a/app/store/chat.ts b/app/store/chat.ts index 27a7114a3..d14bd82d8 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -9,18 +9,23 @@ import { DEFAULT_MODELS, DEFAULT_SYSTEM_TEMPLATE, KnowledgeCutOffDate, + ServiceProvider, ModelProvider, StoreKey, SUMMARIZE_MODEL, GEMINI_SUMMARIZE_MODEL, } from "../constant"; -import { ClientApi, RequestMessage, MultimodalContent } from "../client/api"; +import { getClientApi } from "../client/api"; +import type { + ClientApi, + RequestMessage, + MultimodalContent, +} from "../client/api"; import { ChatControllerPool } from "../client/controller"; import { prettyObject } from "../utils/format"; import { estimateTokenLength } from "../utils/token"; import { nanoid } from "nanoid"; import { createPersistStore } from "../utils/store"; -import { identifyDefaultClaudeModel } from "../utils/checkers"; import { collectModelsWithDefaultModel } from "../utils/model"; import { useAccessStore } from "./access"; @@ -363,15 +368,7 @@ export const useChatStore = createPersistStore( ]); }); - var api: ClientApi; - if (modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } - + const api: ClientApi = getClientApi(modelConfig.providerName); // make request api.llm.chat({ messages: sendMessages, @@ -547,14 +544,7 @@ export const useChatStore = createPersistStore( const session = get().currentSession(); const modelConfig = session.mask.modelConfig; - var api: ClientApi; - if (modelConfig.model.startsWith("gemini")) { - api = new ClientApi(ModelProvider.GeminiPro); - } else if (identifyDefaultClaudeModel(modelConfig.model)) { - api = new ClientApi(ModelProvider.Claude); - } else { - api = new ClientApi(ModelProvider.GPT); - } + const api: ClientApi = getClientApi(modelConfig.providerName); // remove error messages if any const messages = session.messages; diff --git a/app/store/config.ts b/app/store/config.ts index 94cfcd8ec..1eaafe12b 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -5,6 +5,7 @@ import { DEFAULT_MODELS, DEFAULT_SIDEBAR_WIDTH, StoreKey, + ServiceProvider, } from "../constant"; import { createPersistStore } from "../utils/store"; @@ -48,6 +49,7 @@ export const DEFAULT_CONFIG = { modelConfig: { model: "gpt-3.5-turbo" as ModelType, + providerName: "OpenAI" as ServiceProvider, temperature: 0.5, top_p: 1, max_tokens: 4000, @@ -116,12 +118,12 @@ export const useAppConfig = createPersistStore( for (const model of oldModels) { model.available = false; - modelMap[model.name] = model; + modelMap[`${model.name}@${model?.provider?.id}`] = model; } for (const model of newModels) { model.available = true; - modelMap[model.name] = model; + modelMap[`${model.name}@${model?.provider?.id}`] = model; } set(() => ({ diff --git a/app/styles/globals.scss b/app/styles/globals.scss index 20792cda5..3c59f2d44 100644 --- a/app/styles/globals.scss +++ b/app/styles/globals.scss @@ -118,7 +118,7 @@ body { } ::-webkit-scrollbar { - --bar-width: 5px; + --bar-width: 10px; width: var(--bar-width); height: var(--bar-width); } diff --git a/app/utils/baidu.ts b/app/utils/baidu.ts new file mode 100644 index 000000000..ddeb17bd5 --- /dev/null +++ b/app/utils/baidu.ts @@ -0,0 +1,23 @@ +import { BAIDU_OATUH_URL } from "../constant"; +/** + * 使用 AK,SK 生成鉴权签名(Access Token) + * @return 鉴权签名信息 + */ +export async function getAccessToken( + clientId: string, + clientSecret: string, +): Promise<{ + access_token: string; + expires_in: number; + error?: number; +}> { + const res = await fetch( + `${BAIDU_OATUH_URL}?grant_type=client_credentials&client_id=${clientId}&client_secret=${clientSecret}`, + { + method: "POST", + mode: "cors", + }, + ); + const resJson = await res.json(); + return resJson; +} diff --git a/app/utils/checkers.ts b/app/utils/checkers.ts deleted file mode 100644 index 4496e1039..000000000 --- a/app/utils/checkers.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { useAccessStore } from "../store/access"; -import { useAppConfig } from "../store/config"; -import { collectModels } from "./model"; - -export function identifyDefaultClaudeModel(modelName: string) { - const accessStore = useAccessStore.getState(); - const configStore = useAppConfig.getState(); - - const allModals = collectModels( - configStore.models, - [configStore.customModels, accessStore.customModels].join(","), - ); - - const modelMeta = allModals.find((m) => m.name === modelName); - - return ( - modelName.startsWith("claude") && - modelMeta && - modelMeta.provider?.providerType === "anthropic" - ); -} diff --git a/app/utils/cloudflare.ts b/app/utils/cloudflare.ts new file mode 100644 index 000000000..5094640fc --- /dev/null +++ b/app/utils/cloudflare.ts @@ -0,0 +1,26 @@ +export function cloudflareAIGatewayUrl(fetchUrl: string) { + // rebuild fetchUrl, if using cloudflare ai gateway + // document: https://developers.cloudflare.com/ai-gateway/providers/openai/ + + const paths = fetchUrl.split("/"); + if ("gateway.ai.cloudflare.com" == paths[2]) { + // is cloudflare.com ai gateway + // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/azure-openai/{resource_name}/{deployment_name}/chat/completions?api-version=2023-05-15' + if ("azure-openai" == paths[6]) { + // is azure gateway + return paths.slice(0, 8).concat(paths.slice(-3)).join("/"); // rebuild ai gateway azure_url + } + // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/openai/chat/completions + if ("openai" == paths[6]) { + // is openai gateway + return paths.slice(0, 7).concat(paths.slice(-2)).join("/"); // rebuild ai gateway openai_url + } + // https://gateway.ai.cloudflare.com/v1/{account_id}/{gateway_id}/anthropic/v1/messages \ + if ("anthropic" == paths[6]) { + // is anthropic gateway + return paths.slice(0, 7).concat(paths.slice(-2)).join("/"); // rebuild ai gateway anthropic_url + } + // TODO: Amazon Bedrock, Groq, HuggingFace... + } + return fetchUrl; +} diff --git a/app/utils/hooks.ts b/app/utils/hooks.ts index 55d5d4fca..f7f1385e0 100644 --- a/app/utils/hooks.ts +++ b/app/utils/hooks.ts @@ -11,7 +11,12 @@ export function useAllModels() { [configStore.customModels, accessStore.customModels].join(","), accessStore.defaultModel, ); - }, [accessStore.customModels, configStore.customModels, configStore.models]); + }, [ + accessStore.customModels, + accessStore.defaultModel, + configStore.customModels, + configStore.models, + ]); return models; } diff --git a/app/utils/model.ts b/app/utils/model.ts index 056fff2e9..55a5ee0d6 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -1,8 +1,9 @@ +import { DEFAULT_MODELS } from "../constant"; import { LLMModel } from "../client/api"; -const customProvider = (modelName: string) => ({ - id: modelName, - providerName: "", +const customProvider = (providerName: string) => ({ + id: providerName.toLowerCase(), + providerName: providerName, providerType: "custom", }); @@ -23,7 +24,8 @@ export function collectModelTable( // default models models.forEach((m) => { - modelTable[m.name] = { + // using @ as fullName + modelTable[`${m.name}@${m?.provider?.id}`] = { ...m, displayName: m.name, // 'provider' is copied over if it exists }; @@ -37,7 +39,7 @@ export function collectModelTable( const available = !m.startsWith("-"); const nameConfig = m.startsWith("+") || m.startsWith("-") ? m.slice(1) : m; - const [name, displayName] = nameConfig.split("="); + let [name, displayName] = nameConfig.split("="); // enable or disable all models if (name === "all") { @@ -45,12 +47,45 @@ export function collectModelTable( (model) => (model.available = available), ); } else { - modelTable[name] = { - name, - displayName: displayName || name, - available, - provider: modelTable[name]?.provider ?? customProvider(name), // Use optional chaining - }; + // 1. find model by name, and set available value + const [customModelName, customProviderName] = name.split("@"); + let count = 0; + for (const fullName in modelTable) { + const [modelName, providerName] = fullName.split("@"); + if ( + customModelName == modelName && + (customProviderName === undefined || + customProviderName === providerName) + ) { + count += 1; + modelTable[fullName]["available"] = available; + // swap name and displayName for bytedance + if (providerName === "bytedance") { + [name, displayName] = [displayName, modelName]; + modelTable[fullName]["name"] = name; + } + if (displayName) { + modelTable[fullName]["displayName"] = displayName; + } + } + } + // 2. if model not exists, create new model with available value + if (count === 0) { + let [customModelName, customProviderName] = name.split("@"); + const provider = customProvider( + customProviderName || customModelName, + ); + // swap name and displayName for bytedance + if (displayName && provider.providerName == "ByteDance") { + [customModelName, displayName] = [displayName, customModelName]; + } + modelTable[`${customModelName}@${provider?.id}`] = { + name: customModelName, + displayName: displayName || customModelName, + available, + provider, // Use optional chaining + }; + } } }); @@ -100,3 +135,13 @@ export function collectModelsWithDefaultModel( const allModels = Object.values(modelTable); return allModels; } + +export function isModelAvailableInServer( + customModels: string, + modelName: string, + providerName: string, +) { + const fullName = `${modelName}@${providerName}`; + const modelTable = collectModelTable(DEFAULT_MODELS, customModels); + return modelTable[fullName]?.available === false; +} diff --git a/next.config.mjs b/next.config.mjs index daaeba468..27c60dd29 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -69,6 +69,11 @@ if (mode !== "export") { source: "/api/proxy/v1/:path*", destination: "https://api.openai.com/v1/:path*", }, + { + // https://{resource_name}.openai.azure.com/openai/deployments/{deploy_name}/chat/completions + source: "/api/proxy/azure/:resource_name/deployments/:deploy_name/:path*", + destination: "https://:resource_name.openai.azure.com/openai/deployments/:deploy_name/:path*", + }, { source: "/api/proxy/google/:path*", destination: "https://generativelanguage.googleapis.com/:path*", diff --git a/package.json b/package.json index 4d06b0b14..ed5edb043 100644 --- a/package.json +++ b/package.json @@ -3,14 +3,16 @@ "private": false, "license": "mit", "scripts": { - "dev": "next dev", - "build": "cross-env BUILD_MODE=standalone next build", + "mask": "npx tsx app/masks/build.ts", + "mask:watch": "npx watch 'yarn mask' app/masks", + "dev": "yarn run mask:watch & next dev", + "build": "yarn mask && cross-env BUILD_MODE=standalone next build", "start": "next start", "lint": "next lint", - "export": "cross-env BUILD_MODE=export BUILD_APP=1 next build", - "export:dev": "cross-env BUILD_MODE=export BUILD_APP=1 next dev", - "app:dev": "yarn tauri dev", - "app:build": "yarn tauri build", + "export": "yarn mask && cross-env BUILD_MODE=export BUILD_APP=1 next build", + "export:dev": "yarn mask:watch & cross-env BUILD_MODE=export BUILD_APP=1 next dev", + "app:dev": "yarn mask:watch & yarn tauri dev", + "app:build": "yarn mask && yarn tauri build", "prompts": "node ./scripts/fetch-prompts.mjs", "prepare": "husky install", "proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev" @@ -59,7 +61,9 @@ "husky": "^8.0.0", "lint-staged": "^13.2.2", "prettier": "^3.0.2", + "tsx": "^4.16.0", "typescript": "5.2.2", + "watch": "^1.0.2", "webpack": "^5.88.1" }, "resolutions": { diff --git a/src-tauri/icons/icon.icns b/src-tauri/icons/icon.icns index 131a0810a..0967ef466 100644 Binary files a/src-tauri/icons/icon.icns and b/src-tauri/icons/icon.icns differ diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index ee87d8d15..f20bfbb74 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -9,7 +9,7 @@ }, "package": { "productName": "NextChat", - "version": "2.12.3" + "version": "2.13.0" }, "tauri": { "allowlist": { diff --git a/yarn.lock b/yarn.lock index 72df8cafc..c323a5c38 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1092,6 +1092,121 @@ resolved "https://registry.yarnpkg.com/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz#923ca57e173c6b232bbbb07347b1be982f03e783" integrity sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A== +"@esbuild/aix-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz#c7184a326533fcdf1b8ee0733e21c713b975575f" + integrity sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ== + +"@esbuild/android-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz#09d9b4357780da9ea3a7dfb833a1f1ff439b4052" + integrity sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A== + +"@esbuild/android-arm@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/android-arm/-/android-arm-0.21.5.tgz#9b04384fb771926dfa6d7ad04324ecb2ab9b2e28" + integrity sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg== + +"@esbuild/android-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/android-x64/-/android-x64-0.21.5.tgz#29918ec2db754cedcb6c1b04de8cd6547af6461e" + integrity sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA== + +"@esbuild/darwin-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz#e495b539660e51690f3928af50a76fb0a6ccff2a" + integrity sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ== + +"@esbuild/darwin-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz#c13838fa57372839abdddc91d71542ceea2e1e22" + integrity sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw== + +"@esbuild/freebsd-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz#646b989aa20bf89fd071dd5dbfad69a3542e550e" + integrity sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g== + +"@esbuild/freebsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz#aa615cfc80af954d3458906e38ca22c18cf5c261" + integrity sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ== + +"@esbuild/linux-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz#70ac6fa14f5cb7e1f7f887bcffb680ad09922b5b" + integrity sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q== + +"@esbuild/linux-arm@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz#fc6fd11a8aca56c1f6f3894f2bea0479f8f626b9" + integrity sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA== + +"@esbuild/linux-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz#3271f53b3f93e3d093d518d1649d6d68d346ede2" + integrity sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg== + +"@esbuild/linux-loong64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz#ed62e04238c57026aea831c5a130b73c0f9f26df" + integrity sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg== + +"@esbuild/linux-mips64el@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz#e79b8eb48bf3b106fadec1ac8240fb97b4e64cbe" + integrity sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg== + +"@esbuild/linux-ppc64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz#5f2203860a143b9919d383ef7573521fb154c3e4" + integrity sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w== + +"@esbuild/linux-riscv64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz#07bcafd99322d5af62f618cb9e6a9b7f4bb825dc" + integrity sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA== + +"@esbuild/linux-s390x@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz#b7ccf686751d6a3e44b8627ababc8be3ef62d8de" + integrity sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A== + +"@esbuild/linux-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz#6d8f0c768e070e64309af8004bb94e68ab2bb3b0" + integrity sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ== + +"@esbuild/netbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz#bbe430f60d378ecb88decb219c602667387a6047" + integrity sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg== + +"@esbuild/openbsd-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz#99d1cf2937279560d2104821f5ccce220cb2af70" + integrity sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow== + +"@esbuild/sunos-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz#08741512c10d529566baba837b4fe052c8f3487b" + integrity sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg== + +"@esbuild/win32-arm64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz#675b7385398411240735016144ab2e99a60fc75d" + integrity sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A== + +"@esbuild/win32-ia32@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz#1bfc3ce98aa6ca9a0969e4d2af72144c59c1193b" + integrity sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA== + +"@esbuild/win32-x64@0.21.5": + version "0.21.5" + resolved "https://registry.npmmirror.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" + integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== + "@eslint-community/eslint-utils@^4.2.0": version "4.4.0" resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59" @@ -2981,6 +3096,35 @@ es-to-primitive@^1.2.1: is-date-object "^1.0.1" is-symbol "^1.0.2" +esbuild@~0.21.5: + version "0.21.5" + resolved "https://registry.npmmirror.com/esbuild/-/esbuild-0.21.5.tgz#9ca301b120922959b766360d8ac830da0d02997d" + integrity sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw== + optionalDependencies: + "@esbuild/aix-ppc64" "0.21.5" + "@esbuild/android-arm" "0.21.5" + "@esbuild/android-arm64" "0.21.5" + "@esbuild/android-x64" "0.21.5" + "@esbuild/darwin-arm64" "0.21.5" + "@esbuild/darwin-x64" "0.21.5" + "@esbuild/freebsd-arm64" "0.21.5" + "@esbuild/freebsd-x64" "0.21.5" + "@esbuild/linux-arm" "0.21.5" + "@esbuild/linux-arm64" "0.21.5" + "@esbuild/linux-ia32" "0.21.5" + "@esbuild/linux-loong64" "0.21.5" + "@esbuild/linux-mips64el" "0.21.5" + "@esbuild/linux-ppc64" "0.21.5" + "@esbuild/linux-riscv64" "0.21.5" + "@esbuild/linux-s390x" "0.21.5" + "@esbuild/linux-x64" "0.21.5" + "@esbuild/netbsd-x64" "0.21.5" + "@esbuild/openbsd-x64" "0.21.5" + "@esbuild/sunos-x64" "0.21.5" + "@esbuild/win32-arm64" "0.21.5" + "@esbuild/win32-ia32" "0.21.5" + "@esbuild/win32-x64" "0.21.5" + escalade@^3.1.1: version "3.1.1" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.1.tgz#d8cfdc7000965c5a0174b4a82eaa5c0552742e40" @@ -3234,6 +3378,13 @@ events@^3.2.0: resolved "https://registry.npmmirror.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== +exec-sh@^0.2.0: + version "0.2.2" + resolved "https://registry.npmmirror.com/exec-sh/-/exec-sh-0.2.2.tgz#2a5e7ffcbd7d0ba2755bdecb16e5a427dfbdec36" + integrity sha512-FIUCJz1RbuS0FKTdaAafAByGS0CPvU3R0MeHxgtl+djzCc//F8HakL8GzmVNZanasTbTAY/3DRFA0KpVqj/eAw== + dependencies: + merge "^1.2.0" + execa@^7.0.0: version "7.1.1" resolved "https://registry.yarnpkg.com/execa/-/execa-7.1.1.tgz#3eb3c83d239488e7b409d48e8813b76bb55c9c43" @@ -3376,6 +3527,11 @@ fsevents@~2.3.2: resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a" integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA== +fsevents@~2.3.3: + version "2.3.3" + resolved "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + function-bind@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d" @@ -3433,6 +3589,13 @@ get-tsconfig@^4.5.0: resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.5.0.tgz#6d52d1c7b299bd3ee9cd7638561653399ac77b0f" integrity sha512-MjhiaIWCJ1sAU4pIQ5i5OfOuHHxVo1oYeNsWTON7jxYkod8pHocXeh+SSbmu5OZZZK73B6cbJ2XADzXehLyovQ== +get-tsconfig@^4.7.5: + version "4.7.5" + resolved "https://registry.npmmirror.com/get-tsconfig/-/get-tsconfig-4.7.5.tgz#5e012498579e9a6947511ed0cd403272c7acbbaf" + integrity sha512-ZCuZCnlqNzjb4QprAzXKdpp/gh6KTxSJuw3IBsPnV/7fV4NxC9ckB+vPTt8w7fJA0TaSD7c55BR47JD6MEDyDw== + dependencies: + resolve-pkg-maps "^1.0.0" + glob-parent@^5.1.2, glob-parent@~5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" @@ -4387,6 +4550,11 @@ merge2@^1.3.0, merge2@^1.4.1: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== +merge@^1.2.0: + version "1.2.1" + resolved "https://registry.npmmirror.com/merge/-/merge-1.2.1.tgz#38bebf80c3220a8a487b6fcfb3941bb11720c145" + integrity sha512-VjFo4P5Whtj4vsLzsYBu5ayHhoHJ0UqNm7ibvShmbmoz7tGi0vXaoJbGdB+GmDMLUdg8DpQXEIeVDAe8MaABvQ== + mermaid@^10.6.1: version "10.6.1" resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-10.6.1.tgz#701f4160484137a417770ce757ce1887a98c00fc" @@ -5317,6 +5485,11 @@ resolve-from@^4.0.0: resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== +resolve-pkg-maps@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz#616b3dc2c57056b5588c31cdf4b3d64db133720f" + integrity sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw== + resolve@^1.14.2, resolve@^1.22.1: version "1.22.1" resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.1.tgz#27cb2ebb53f91abb49470a928bba7558066ac177" @@ -5823,6 +5996,16 @@ tslib@^2.6.2: resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== +tsx@^4.16.0: + version "4.16.0" + resolved "https://registry.npmmirror.com/tsx/-/tsx-4.16.0.tgz#913dd96f191b76f07a8744201d8c15d510aa1352" + integrity sha512-MPgN+CuY+4iKxGoJNPv+1pyo5YWZAQ5XfsyobUG+zoKG7IkvCPLZDEyoIb8yLS2FcWci1nlxAqmvPlFWD5AFiQ== + dependencies: + esbuild "~0.21.5" + get-tsconfig "^4.7.5" + optionalDependencies: + fsevents "~2.3.3" + type-check@^0.4.0, type-check@~0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" @@ -6043,6 +6226,14 @@ vfile@^5.0.0: unist-util-stringify-position "^3.0.0" vfile-message "^3.0.0" +watch@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/watch/-/watch-1.0.2.tgz#340a717bde765726fa0aa07d721e0147a551df0c" + integrity sha512-1u+Z5n9Jc1E2c7qDO8SinPoZuHj7FgbgU1olSFoyaklduDvvtX7GMMtlE6OC9FTXq4KvNAOfj6Zu4vI1e9bAKA== + dependencies: + exec-sh "^0.2.0" + minimist "^1.2.0" + watchpack@^2.4.0: version "2.4.0" resolved "https://registry.yarnpkg.com/watchpack/-/watchpack-2.4.0.tgz#fa33032374962c78113f93c7f2fb4c54c9862a5d"