diff --git a/.env.template b/.env.template index 25addf2b3..82f44216a 100644 --- a/.env.template +++ b/.env.template @@ -66,4 +66,4 @@ ANTHROPIC_API_VERSION= ANTHROPIC_URL= ### (optional) -WHITE_WEBDEV_ENDPOINTS= \ No newline at end of file +WHITE_WEBDAV_ENDPOINTS= \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json index d229e86f2..5b5e88e67 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -1,4 +1,7 @@ { "extends": "next/core-web-vitals", - "plugins": ["prettier"] + "plugins": ["prettier", "unused-imports"], + "rules": { + "unused-imports/no-unused-imports": "warn" + } } diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml index bdbb78c27..30d9b85b4 100644 --- a/.github/workflows/deploy_preview.yml +++ b/.github/workflows/deploy_preview.yml @@ -49,7 +49,7 @@ jobs: run: npm install --global vercel@latest - name: Cache dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-npm with: path: ~/.npm diff --git a/README.md b/README.md index c8b158956..be5e91d65 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,18 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 +[![Saas][Saas-image]][saas-url] [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) +[NextChatAI](https://nextchat.dev/chat) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[saas-url]: https://nextchat.dev/chat +[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge @@ -172,7 +175,7 @@ We recommend that you follow the steps below to re-deploy: ### Enable Automatic Updates -> If you encounter a failure of Upstream Sync execution, please manually sync fork once. +> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code). After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: @@ -340,7 +343,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name Change default model -### `WHITE_WEBDEV_ENDPOINTS` (optional) +### `WHITE_WEBDAV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: - Each address must be a complete endpoint diff --git a/README_CN.md b/README_CN.md index beed396c5..640fe3933 100644 --- a/README_CN.md +++ b/README_CN.md @@ -8,7 +8,7 @@ 一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 打开自动更新 -> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! +> 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)! 当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新: @@ -202,7 +202,7 @@ ByteDance Api Url. 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 -### `WHITE_WEBDEV_ENDPOINTS` (可选) +### `WHITE_WEBDAV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: - 每一个地址必须是一个完整的 endpoint diff --git a/README_JA.md b/README_JA.md index 6b8caadae..ba3c514dc 100644 --- a/README_JA.md +++ b/README_JA.md @@ -5,7 +5,7 @@ ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 -[企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 自動更新を開く -> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください! +> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください! プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: @@ -193,7 +193,7 @@ ByteDance API の URL。 リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。 -### `WHITE_WEBDEV_ENDPOINTS` (オプション) +### `WHITE_WEBDAV_ENDPOINTS` (オプション) アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件: - 各アドレスは完全なエンドポイントでなければなりません。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 24aa5ec04..dffb3e9da 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -1,5 +1,5 @@ import { ApiPath } from "@/app/constant"; -import { NextRequest, NextResponse } from "next/server"; +import { NextRequest } from "next/server"; import { handle as openaiHandler } from "../../openai"; import { handle as azureHandler } from "../../azure"; import { handle as googleHandler } from "../../google"; diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts index 675d9f301..894b1ae4c 100644 --- a/app/api/alibaba.ts +++ b/app/api/alibaba.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Alibaba, ALIBABA_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts index d7b070247..7a4444371 100644 --- a/app/api/anthropic.ts +++ b/app/api/anthropic.ts @@ -3,7 +3,6 @@ import { ANTHROPIC_BASE_URL, Anthropic, ApiPath, - DEFAULT_MODELS, ServiceProvider, ModelProvider, } from "@/app/constant"; diff --git a/app/api/azure.ts b/app/api/azure.ts index e2cb0c7e6..39d872e8c 100644 --- a/app/api/azure.ts +++ b/app/api/azure.ts @@ -1,4 +1,3 @@ -import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; diff --git a/app/api/baidu.ts b/app/api/baidu.ts index f4315d186..0408b43c5 100644 --- a/app/api/baidu.ts +++ b/app/api/baidu.ts @@ -3,7 +3,6 @@ import { BAIDU_BASE_URL, ApiPath, ModelProvider, - BAIDU_OATUH_URL, ServiceProvider, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; diff --git a/app/api/common.ts b/app/api/common.ts index 25decbf62..b4c792d6f 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,11 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { - DEFAULT_MODELS, - OPENAI_BASE_URL, - GEMINI_BASE_URL, - ServiceProvider, -} from "../constant"; +import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; import { isModelAvailableInServer } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; diff --git a/app/api/google.ts b/app/api/google.ts index 98fe469bf..707892c33 100644 --- a/app/api/google.ts +++ b/app/api/google.ts @@ -1,12 +1,7 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "./auth"; import { getServerSideConfig } from "@/app/config/server"; -import { - ApiPath, - GEMINI_BASE_URL, - Google, - ModelProvider, -} from "@/app/constant"; +import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; const serverConfig = getServerSideConfig(); @@ -28,7 +23,8 @@ export async function handle( }); } - const bearToken = req.headers.get("Authorization") ?? ""; + const bearToken = + req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || ""; const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const apiKey = token ? token : serverConfig.googleApiKey; @@ -96,8 +92,8 @@ async function request(req: NextRequest, apiKey: string) { }, 10 * 60 * 1000, ); - const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ - req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" + const fetchUrl = `${baseUrl}${path}${ + req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : "" }`; console.log("[Fetch Url] ", fetchUrl); @@ -105,6 +101,9 @@ async function request(req: NextRequest, apiKey: string) { headers: { "Content-Type": "application/json", "Cache-Control": "no-store", + "x-goog-api-key": + req.headers.get("x-goog-api-key") || + (req.headers.get("Authorization") ?? "").replace("Bearer ", ""), }, method: req.method, body: req.body, diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts index eabdd9f4c..8b8227dce 100644 --- a/app/api/iflytek.ts +++ b/app/api/iflytek.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Iflytek, IFLYTEK_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; // iflytek const serverConfig = getServerSideConfig(); diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts index 247dd6183..5bf4807e3 100644 --- a/app/api/moonshot.ts +++ b/app/api/moonshot.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Moonshot, MOONSHOT_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts index 885909e7a..fc4f8c79e 100644 --- a/app/api/tencent/route.ts +++ b/app/api/tencent/route.ts @@ -1,15 +1,8 @@ import { getServerSideConfig } from "@/app/config/server"; -import { - TENCENT_BASE_URL, - ApiPath, - ModelProvider, - ServiceProvider, - Tencent, -} from "@/app/constant"; +import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; import { getHeader } from "@/app/utils/tencent"; const serverConfig = getServerSideConfig(); diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 9f96cbfcf..bb7743bda 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -6,7 +6,7 @@ const config = getServerSideConfig(); const mergedAllowedWebDavEndpoints = [ ...internalAllowedWebDavEndpoints, - ...config.allowedWebDevEndpoints, + ...config.allowedWebDavEndpoints, ].filter((domain) => Boolean(domain.trim())); const normalizeUrl = (url: string) => { diff --git a/app/client/api.ts b/app/client/api.ts index cecc453ba..48bbde6bc 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,7 +1,6 @@ import { getClientConfig } from "../config/client"; import { ACCESS_CODE_PREFIX, - Azure, ModelProvider, ServiceProvider, } from "../constant"; @@ -26,6 +25,7 @@ export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; +export const TTSModels = ["tts-1", "tts-1-hd"] as const; export type ChatModel = ModelType; export interface MultimodalContent { @@ -54,6 +54,15 @@ export interface LLMConfig { style?: DalleRequestPayload["style"]; } +export interface SpeechOptions { + model: string; + input: string; + voice: string; + response_format?: string; + speed?: number; + onController?: (controller: AbortController) => void; +} + export interface ChatOptions { messages: RequestMessage[]; config: LLMConfig; @@ -88,6 +97,7 @@ export interface LLMModelProvider { export abstract class LLMApi { abstract chat(options: ChatOptions): Promise; + abstract speech(options: SpeechOptions): Promise; abstract usage(): Promise; abstract models(): Promise; } @@ -206,13 +216,16 @@ export function validString(x: string): boolean { return x?.length > 0; } -export function getHeaders() { +export function getHeaders(ignoreHeaders: boolean = false) { const accessStore = useAccessStore.getState(); const chatStore = useChatStore.getState(); - const headers: Record = { - "Content-Type": "application/json", - Accept: "application/json", - }; + let headers: Record = {}; + if (!ignoreHeaders) { + headers = { + "Content-Type": "application/json", + Accept: "application/json", + }; + } const clientConfig = getClientConfig(); @@ -259,7 +272,13 @@ export function getHeaders() { } function getAuthHeader(): string { - return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + return isAzure + ? "api-key" + : isAnthropic + ? "x-api-key" + : isGoogle + ? "x-goog-api-key" + : "Authorization"; } const { @@ -270,14 +289,15 @@ export function getHeaders() { apiKey, isEnabledAccessControl, } = getConfig(); - // when using google api in app, not set auth header - if (isGoogle && clientConfig?.isApp) return headers; // when using baidu api in app, not set auth header if (isBaidu && clientConfig?.isApp) return headers; const authHeader = getAuthHeader(); - const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + const bearerToken = getBearerToken( + apiKey, + isAzure || isAnthropic || isGoogle, + ); if (bearerToken) { headers[authHeader] = bearerToken; diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index d5fa3042f..4ade9ebb9 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -12,6 +12,7 @@ import { getHeaders, LLMApi, LLMModel, + SpeechOptions, MultimodalContent, } from "../api"; import Locale from "../../locales"; @@ -83,6 +84,10 @@ export class QwenApi implements LLMApi { return res?.output?.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 7dd39c9cd..7826838a6 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,5 +1,5 @@ -import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; +import { Anthropic, ApiPath } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api"; import { useAccessStore, useAppConfig, @@ -9,13 +9,6 @@ import { } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; - -import Locale from "../../locales"; -import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; @@ -80,6 +73,10 @@ const ClaudeMapper = { const keys = ["claude-2, claude-instant-1"]; export class ClaudeApi implements LLMApi { + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + extractMessage(res: any) { console.log("[Response] claude response: ", res); diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 3be147f49..c360417c6 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -14,6 +14,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -75,6 +76,10 @@ export class ErnieApi implements LLMApi { return [baseUrl, path].join("/"); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 7677cafe1..a6e2d426e 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -13,6 +13,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -77,6 +78,10 @@ export class DoubaoApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 12d884635..3c2607271 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,5 +1,12 @@ import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + SpeechOptions, +} from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; @@ -41,10 +48,6 @@ export class GeminiProApi implements LLMApi { let chatPath = [baseUrl, path].join("/"); chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; - // if chatPath.startsWith('http') then add key in query string - if (chatPath.startsWith("http") && accessStore.googleApiKey) { - chatPath += `&key=${accessStore.googleApiKey}`; - } return chatPath; } extractMessage(res: any) { @@ -56,6 +59,10 @@ export class GeminiProApi implements LLMApi { "" ); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions): Promise { const apiClient = this; let multimodal = false; diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index 73cea5ba0..3931672e6 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -7,7 +7,13 @@ import { } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; import Locale from "../../locales"; import { EventStreamContentType, @@ -17,7 +23,7 @@ import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; -import { OpenAIListModelResponse, RequestPayload } from "./openai"; +import { RequestPayload } from "./openai"; export class SparkApi implements LLMApi { private disableListModels = true; @@ -53,6 +59,10 @@ export class SparkApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index cd10d2f6c..6b1979745 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -3,10 +3,8 @@ import { ApiPath, DEFAULT_API_HOST, - DEFAULT_MODELS, Moonshot, REQUEST_TIMEOUT_MS, - ServiceProvider, } from "@/app/constant"; import { useAccessStore, @@ -15,28 +13,17 @@ import { ChatMessageTool, usePluginStore, } from "@/app/store"; -import { collectModelsWithDefaultModel } from "@/app/utils/model"; -import { preProcessImageContent, stream } from "@/app/utils/chat"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; - +import { stream } from "@/app/utils/chat"; import { ChatOptions, getHeaders, LLMApi, LLMModel, - LLMUsage, - MultimodalContent, + SpeechOptions, } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; - -import { OpenAIListModelResponse, RequestPayload } from "./openai"; +import { RequestPayload } from "./openai"; export class MoonshotApi implements LLMApi { private disableListModels = true; @@ -72,6 +59,10 @@ export class MoonshotApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 664ff872b..0a8d6203a 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -33,17 +33,12 @@ import { LLMModel, LLMUsage, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, - getMessageImages, isVisionModel, isDalle3 as _isDalle3, } from "@/app/utils"; @@ -147,6 +142,44 @@ export class ChatGPTApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? res; } + async speech(options: SpeechOptions): Promise { + const requestPayload = { + model: options.model, + input: options.input, + voice: options.voice, + response_format: options.response_format, + speed: options.speed, + }; + + console.log("[Request] openai speech payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const speechPath = this.path(OpenaiPath.SpeechPath); + const speechPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + const res = await fetch(speechPath, speechPayload); + clearTimeout(requestTimeoutId); + return await res.arrayBuffer(); + } catch (e) { + console.log("[Request] failed to make a speech request", e); + throw e; + } + } + async chat(options: ChatOptions) { const modelConfig = { ...useAppConfig.getState().modelConfig, @@ -244,6 +277,7 @@ export class ChatGPTApi implements LLMApi { ); } if (shouldStream) { + let index = -1; const [tools, funcs] = usePluginStore .getState() .getAsTools( @@ -269,10 +303,10 @@ export class ChatGPTApi implements LLMApi { }>; const tool_calls = choices[0]?.delta?.tool_calls; if (tool_calls?.length > 0) { - const index = tool_calls[0]?.index; const id = tool_calls[0]?.id; const args = tool_calls[0]?.function?.arguments; if (id) { + index += 1; runTools.push({ id, type: tool_calls[0]?.type, @@ -294,6 +328,8 @@ export class ChatGPTApi implements LLMApi { toolCallMessage: any, toolCallResult: any[], ) => { + // reset index value + index = -1; // @ts-ignore requestPayload?.messages?.splice( // @ts-ignore diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 579008a9b..3e8f1a459 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -8,6 +8,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -89,6 +90,10 @@ export class HunyuanApi implements LLMApi { return res.Choices?.at(0)?.Message?.Content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const visionModel = isVisionModel(options.config.model); const messages = options.messages.map((v, index) => ({ diff --git a/app/command.ts b/app/command.ts index bea4e06f3..aec73ef53 100644 --- a/app/command.ts +++ b/app/command.ts @@ -38,6 +38,7 @@ interface ChatCommands { next?: Command; prev?: Command; clear?: Command; + fork?: Command; del?: Command; } diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx index d725ee659..ce187fbcb 100644 --- a/app/components/artifacts.tsx +++ b/app/components/artifacts.tsx @@ -7,7 +7,6 @@ import { useImperativeHandle, } from "react"; import { useParams } from "react-router"; -import { useWindowSize } from "@/app/utils"; import { IconButton } from "./button"; import { nanoid } from "nanoid"; import ExportIcon from "../icons/share.svg"; diff --git a/app/components/auth.module.scss b/app/components/auth.module.scss index 6630c0613..fe143b428 100644 --- a/app/components/auth.module.scss +++ b/app/components/auth.module.scss @@ -1,12 +1,70 @@ .auth-page { display: flex; - justify-content: center; + justify-content: flex-start; align-items: center; height: 100%; width: 100%; flex-direction: column; + .top-banner { + position: relative; + width: 100%; + display: flex; + justify-content: center; + align-items: center; + padding: 12px 64px; + box-sizing: border-box; + background: var(--second); + .top-banner-inner { + display: flex; + justify-content: center; + align-items: center; + font-size: 14px; + line-height: 150%; + span { + gap: 8px; + a { + display: inline-flex; + align-items: center; + text-decoration: none; + margin-left: 8px; + color: var(--primary); + } + } + } + .top-banner-close { + cursor: pointer; + position: absolute; + top: 50%; + right: 48px; + transform: translateY(-50%); + } + } + + @media (max-width: 600px) { + .top-banner { + padding: 12px 24px 12px 12px; + .top-banner-close { + right: 10px; + } + .top-banner-inner { + .top-banner-logo { + margin-right: 8px; + } + } + } + } + + .auth-header { + display: flex; + justify-content: space-between; + width: 100%; + padding: 10px; + box-sizing: border-box; + animation: slide-in-from-top ease 0.3s; + } .auth-logo { + margin-top: 10vh; transform: scale(1.4); } @@ -14,6 +72,7 @@ font-size: 24px; font-weight: bold; line-height: 2; + margin-bottom: 1vh; } .auth-tips { @@ -24,6 +83,10 @@ margin: 3vh 0; } + .auth-input-second { + margin: 0 0 3vh 0; + } + .auth-actions { display: flex; justify-content: center; diff --git a/app/components/auth.tsx b/app/components/auth.tsx index 57118349b..e19512d87 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -1,21 +1,34 @@ import styles from "./auth.module.scss"; import { IconButton } from "./button"; - +import { useState, useEffect } from "react"; import { useNavigate } from "react-router-dom"; -import { Path } from "../constant"; +import { Path, SAAS_CHAT_URL } from "../constant"; import { useAccessStore } from "../store"; import Locale from "../locales"; - +import Delete from "../icons/close.svg"; +import Arrow from "../icons/arrow.svg"; +import Logo from "../icons/logo.svg"; +import { useMobileScreen } from "@/app/utils"; import BotIcon from "../icons/bot.svg"; -import { useEffect } from "react"; import { getClientConfig } from "../config/client"; +import LeftIcon from "@/app/icons/left.svg"; +import { safeLocalStorage } from "@/app/utils"; +import { + trackSettingsPageGuideToCPaymentClick, + trackAuthorizationPageButtonToCPaymentClick, +} from "../utils/auth-settings-events"; +const storage = safeLocalStorage(); export function AuthPage() { const navigate = useNavigate(); const accessStore = useAccessStore(); - const goHome = () => navigate(Path.Home); const goChat = () => navigate(Path.Chat); + const goSaas = () => { + trackAuthorizationPageButtonToCPaymentClick(); + window.location.href = SAAS_CHAT_URL; + }; + const resetAccessCode = () => { accessStore.update((access) => { access.openaiApiKey = ""; @@ -32,6 +45,14 @@ export function AuthPage() { return (
+ +
+ } + text={Locale.Auth.Return} + onClick={() => navigate(Path.Home)} + > +
@@ -65,7 +86,7 @@ export function AuthPage() { }} /> { - resetAccessCode(); - goHome(); + goSaas(); }} />
); } + +function TopBanner() { + const [isHovered, setIsHovered] = useState(false); + const [isVisible, setIsVisible] = useState(true); + const isMobile = useMobileScreen(); + useEffect(() => { + // 检查 localStorage 中是否有标记 + const bannerDismissed = storage.getItem("bannerDismissed"); + // 如果标记不存在,存储默认值并显示横幅 + if (!bannerDismissed) { + storage.setItem("bannerDismissed", "false"); + setIsVisible(true); // 显示横幅 + } else if (bannerDismissed === "true") { + // 如果标记为 "true",则隐藏横幅 + setIsVisible(false); + } + }, []); + + const handleMouseEnter = () => { + setIsHovered(true); + }; + + const handleMouseLeave = () => { + setIsHovered(false); + }; + + const handleClose = () => { + setIsVisible(false); + storage.setItem("bannerDismissed", "true"); + }; + + if (!isVisible) { + return null; + } + return ( +
+ + {(isHovered || isMobile) && ( + + )} +
+ ); +} diff --git a/app/components/button.module.scss b/app/components/button.module.scss index e332df2d2..05248bee8 100644 --- a/app/components/button.module.scss +++ b/app/components/button.module.scss @@ -5,7 +5,6 @@ align-items: center; justify-content: center; padding: 10px; - cursor: pointer; transition: all 0.3s ease; overflow: hidden; diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 7ef6e7b83..03b1a5c88 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -1,5 +1,4 @@ import DeleteIcon from "../icons/delete.svg"; -import BotIcon from "../icons/bot.svg"; import styles from "./home.module.scss"; import { @@ -12,7 +11,7 @@ import { import { useChatStore } from "../store"; import Locale from "../locales"; -import { Link, useLocation, useNavigate } from "react-router-dom"; +import { useLocation, useNavigate } from "react-router-dom"; import { Path } from "../constant"; import { MaskAvatar } from "./mask"; import { Mask } from "../store/mask"; diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 3cc02d486..3d519dee7 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -15,6 +15,8 @@ import RenameIcon from "../icons/rename.svg"; import ExportIcon from "../icons/share.svg"; import ReturnIcon from "../icons/return.svg"; import CopyIcon from "../icons/copy.svg"; +import SpeakIcon from "../icons/speak.svg"; +import SpeakStopIcon from "../icons/speak-stop.svg"; import LoadingIcon from "../icons/three-dots.svg"; import LoadingButtonIcon from "../icons/loading.svg"; import PromptIcon from "../icons/prompt.svg"; @@ -96,7 +98,8 @@ import { import { useNavigate } from "react-router-dom"; import { CHAT_PAGE_SIZE, - LAST_INPUT_KEY, + DEFAULT_TTS_ENGINE, + ModelProvider, Path, REQUEST_TIMEOUT_MS, UNFINISHED_INPUT, @@ -113,6 +116,11 @@ import { useAllModels } from "../utils/hooks"; import { MultimodalContent } from "../client/api"; const localStorage = safeLocalStorage(); +import { ClientApi } from "../client/api"; +import { createTTSPlayer } from "../utils/audio"; +import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts"; + +const ttsPlayer = createTTSPlayer(); const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -443,6 +451,7 @@ export function ChatActions(props: { hitBottom: boolean; uploading: boolean; setShowShortcutKeyModal: React.Dispatch>; + setUserInput: (input: string) => void; }) { const config = useAppConfig(); const navigate = useNavigate(); @@ -981,6 +990,7 @@ function _Chat() { chatStore.updateCurrentSession( (session) => (session.clearContextIndex = session.messages.length), ), + fork: () => chatStore.forkSession(), del: () => chatStore.deleteSession(chatStore.currentSessionIndex), }); @@ -1184,10 +1194,55 @@ function _Chat() { }); }; + const accessStore = useAccessStore(); + const [speechStatus, setSpeechStatus] = useState(false); + const [speechLoading, setSpeechLoading] = useState(false); + async function openaiSpeech(text: string) { + if (speechStatus) { + ttsPlayer.stop(); + setSpeechStatus(false); + } else { + var api: ClientApi; + api = new ClientApi(ModelProvider.GPT); + const config = useAppConfig.getState(); + setSpeechLoading(true); + ttsPlayer.init(); + let audioBuffer: ArrayBuffer; + const { markdownToTxt } = require("markdown-to-txt"); + const textContent = markdownToTxt(text); + if (config.ttsConfig.engine !== DEFAULT_TTS_ENGINE) { + const edgeVoiceName = accessStore.edgeVoiceName(); + const tts = new MsEdgeTTS(); + await tts.setMetadata( + edgeVoiceName, + OUTPUT_FORMAT.AUDIO_24KHZ_96KBITRATE_MONO_MP3, + ); + audioBuffer = await tts.toArrayBuffer(textContent); + } else { + audioBuffer = await api.llm.speech({ + model: config.ttsConfig.model, + input: textContent, + voice: config.ttsConfig.voice, + speed: config.ttsConfig.speed, + }); + } + setSpeechStatus(true); + ttsPlayer + .play(audioBuffer, () => { + setSpeechStatus(false); + }) + .catch((e) => { + console.error("[OpenAI Speech]", e); + showToast(prettyObject(e)); + setSpeechStatus(false); + }) + .finally(() => setSpeechLoading(false)); + } + } + const context: RenderMessage[] = useMemo(() => { return session.mask.hideContext ? [] : session.mask.context.slice(); }, [session.mask.context, session.mask.hideContext]); - const accessStore = useAccessStore(); if ( context.length === 0 && @@ -1724,6 +1779,25 @@ function _Chat() { ) } /> + {config.ttsConfig.enable && ( + + ) : ( + + ) + } + onClick={() => + openaiSpeech(getMessageTextContent(message)) + } + /> + )} )} @@ -1842,6 +1916,7 @@ function _Chat() { onSearch(""); }} setShowShortcutKeyModal={setShowShortcutKeyModal} + setUserInput={setUserInput} />