Merge branch 'main' into feat/highlight_more_languages
|
@ -3,9 +3,7 @@ name: VercelPreviewDeployment
|
|||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- synchronize
|
||||
- reopened
|
||||
- review_requested
|
||||
|
||||
env:
|
||||
VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
name: Run Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "!*"
|
||||
pull_request:
|
||||
types:
|
||||
- review_requested
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
cache: "yarn"
|
||||
|
||||
- name: Cache node_modules
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: node_modules
|
||||
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node_modules-
|
||||
|
||||
- name: Install dependencies
|
||||
run: yarn install
|
||||
|
||||
- name: Run Jest tests
|
||||
run: yarn test:ci
|
61
README.md
|
@ -1,16 +1,17 @@
|
|||
<div align="center">
|
||||
|
||||
<a href='#企业版'>
|
||||
<img src="./docs/images/ent.svg" alt="icon"/>
|
||||
<a href='https://nextchat.dev/chat'>
|
||||
<img src="https://github.com/user-attachments/assets/287c510f-f508-478e-ade3-54d30453dc18" width="1000" alt="icon"/>
|
||||
</a>
|
||||
|
||||
|
||||
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
|
||||
|
||||
English / [简体中文](./README_CN.md)
|
||||
|
||||
One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support.
|
||||
One-Click to get a well-designed cross-platform ChatGPT web UI, with Claude, GPT4 & Gemini Pro support.
|
||||
|
||||
一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
|
||||
一键免费部署你的跨平台私人 ChatGPT 应用, 支持 Claude, GPT4 & Gemini Pro 模型。
|
||||
|
||||
[![Saas][Saas-image]][saas-url]
|
||||
[![Web][Web-image]][web-url]
|
||||
|
@ -18,11 +19,11 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
|
|||
[![MacOS][MacOS-image]][download-url]
|
||||
[![Linux][Linux-image]][download-url]
|
||||
|
||||
[NextChatAI](https://nextchat.dev/chat) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
|
||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
|
||||
|
||||
[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
|
||||
[NextChatAI](https://nextchat.dev/chat) / [自部署网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
|
||||
|
||||
[saas-url]: https://nextchat.dev/chat
|
||||
[saas-url]: https://nextchat.dev/chat?utm_source=readme
|
||||
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
|
||||
[web-url]: https://app.nextchat.dev/
|
||||
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
|
||||
|
@ -31,7 +32,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
|
|||
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
|
||||
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
|
||||
|
||||
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
|
||||
[<img src="https://vercel.com/button" alt="Deploy on Vercel" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://img.shields.io/badge/BT_Deploy-Install-20a53a" alt="BT Deply Install" height="30">](https://www.bt.cn/new/download.html)
|
||||
|
||||
[<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="60" width="288" >](https://monica.im/?utm=nxcrp)
|
||||
|
||||
|
@ -63,7 +64,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
|
|||
|
||||
企业版咨询: **business@nextchat.dev**
|
||||
|
||||
<img width="300" src="https://github.com/user-attachments/assets/3daeb7b6-ab63-4542-9141-2e4a12c80601">
|
||||
<img width="300" src="https://github.com/user-attachments/assets/3d4305ac-6e95-489e-884b-51d51db5f692">
|
||||
|
||||
## Features
|
||||
|
||||
|
@ -96,10 +97,12 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
|
|||
- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
|
||||
- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
|
||||
- [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
|
||||
- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
|
||||
- [ ] local knowledge base
|
||||
|
||||
## What's New
|
||||
|
||||
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
|
||||
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
|
||||
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
|
||||
- 🚀 v2.14.0 Now supports Artifacts & SD
|
||||
- 🚀 v2.10.1 support Google Gemini Pro model.
|
||||
|
@ -133,10 +136,12 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
|
|||
- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
|
||||
- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
|
||||
- [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
|
||||
- [x] 支持 Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
|
||||
- [ ] 本地知识库
|
||||
|
||||
## 最新动态
|
||||
|
||||
- 🚀 v2.15.8 现在支持Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
|
||||
- 🚀 v2.15.4 客户端支持Tauri本地直接调用大模型API,更安全
|
||||
- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
|
||||
- 🚀 v2.14.0 现在支持 Artifacts & SD 了。
|
||||
- 🚀 v2.10.1 现在支持 Gemini Pro 模型。
|
||||
|
@ -175,7 +180,7 @@ We recommend that you follow the steps below to re-deploy:
|
|||
|
||||
### Enable Automatic Updates
|
||||
|
||||
> If you encounter a failure of Upstream Sync execution, please manually sync fork once.
|
||||
> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code).
|
||||
|
||||
After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour:
|
||||
|
||||
|
@ -299,6 +304,22 @@ iflytek Api Key.
|
|||
|
||||
iflytek Api Secret.
|
||||
|
||||
### `CHATGLM_API_KEY` (optional)
|
||||
|
||||
ChatGLM Api Key.
|
||||
|
||||
### `CHATGLM_URL` (optional)
|
||||
|
||||
ChatGLM Api Url.
|
||||
|
||||
### `DEEPSEEK_API_KEY` (optional)
|
||||
|
||||
DeepSeek Api Key.
|
||||
|
||||
### `DEEPSEEK_URL` (optional)
|
||||
|
||||
DeepSeek Api Url.
|
||||
|
||||
### `HIDE_USER_API_KEY` (optional)
|
||||
|
||||
> Default: Empty
|
||||
|
@ -332,9 +353,9 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model
|
|||
|
||||
User `-all` to disable all default models, `+all` to enable all default models.
|
||||
|
||||
For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name.
|
||||
> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list.
|
||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
||||
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
|
||||
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
|
||||
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
|
||||
|
||||
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
|
||||
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
|
||||
|
@ -343,6 +364,13 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
|
|||
|
||||
Change default model
|
||||
|
||||
### `VISION_MODELS` (optional)
|
||||
|
||||
> Default: Empty
|
||||
> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc).
|
||||
|
||||
Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas.
|
||||
|
||||
### `WHITE_WEBDAV_ENDPOINTS` (optional)
|
||||
|
||||
You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format:
|
||||
|
@ -395,6 +423,9 @@ yarn dev
|
|||
|
||||
> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署)
|
||||
|
||||
### BT Install
|
||||
> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
|
||||
|
||||
### Docker (Recommended)
|
||||
|
||||
```shell
|
||||
|
|
35
README_CN.md
|
@ -8,7 +8,7 @@
|
|||
|
||||
一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。
|
||||
|
||||
[NextChatAI](https://nextchat.dev/chat) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
|
||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
|
||||
|
||||
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
|
||||
|
||||
|
@ -54,7 +54,7 @@
|
|||
|
||||
### 打开自动更新
|
||||
|
||||
> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次!
|
||||
> 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)!
|
||||
|
||||
当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新:
|
||||
|
||||
|
@ -184,6 +184,21 @@ ByteDance Api Url.
|
|||
|
||||
讯飞星火Api Secret.
|
||||
|
||||
### `CHATGLM_API_KEY` (可选)
|
||||
|
||||
ChatGLM Api Key.
|
||||
|
||||
### `CHATGLM_URL` (可选)
|
||||
|
||||
ChatGLM Api Url.
|
||||
|
||||
### `DEEPSEEK_API_KEY` (可选)
|
||||
|
||||
DeepSeek Api Key.
|
||||
|
||||
### `DEEPSEEK_URL` (可选)
|
||||
|
||||
DeepSeek Api Url.
|
||||
|
||||
|
||||
### `HIDE_USER_API_KEY` (可选)
|
||||
|
@ -216,9 +231,9 @@ ByteDance Api Url.
|
|||
|
||||
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
|
||||
|
||||
在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||
> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
|
||||
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
|
||||
在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||
> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
|
||||
> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
|
||||
|
||||
在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)
|
||||
> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项
|
||||
|
@ -228,6 +243,13 @@ ByteDance Api Url.
|
|||
|
||||
更改默认模型
|
||||
|
||||
### `VISION_MODELS` (可选)
|
||||
|
||||
> 默认值:空
|
||||
> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。
|
||||
|
||||
在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。
|
||||
|
||||
### `DEFAULT_INPUT_TEMPLATE` (可选)
|
||||
|
||||
自定义默认的 template,用于初始化『设置』中的『用户输入预处理』配置项
|
||||
|
@ -264,6 +286,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy
|
|||
|
||||
## 部署
|
||||
|
||||
### 宝塔面板部署
|
||||
> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
|
||||
|
||||
### 容器部署 (推荐)
|
||||
|
||||
> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。
|
||||
|
|
15
README_JA.md
|
@ -5,7 +5,7 @@
|
|||
|
||||
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
|
||||
|
||||
[NextChatAI](https://nextchat.dev/chat) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
|
||||
[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
|
||||
|
||||
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
|
||||
|
||||
|
@ -54,7 +54,7 @@
|
|||
|
||||
### 自動更新を開く
|
||||
|
||||
> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください!
|
||||
> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください!
|
||||
|
||||
プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります:
|
||||
|
||||
|
@ -207,8 +207,8 @@ ByteDance API の URL。
|
|||
|
||||
モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。
|
||||
|
||||
Azure モードでは、`modelName@azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。
|
||||
> 例:`+gpt-3.5-turbo@azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。
|
||||
Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。
|
||||
> 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。
|
||||
|
||||
ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。
|
||||
> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。
|
||||
|
@ -217,6 +217,13 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ
|
|||
|
||||
デフォルトのモデルを変更します。
|
||||
|
||||
### `VISION_MODELS` (オプション)
|
||||
|
||||
> デフォルト:空
|
||||
> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。
|
||||
|
||||
デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。
|
||||
|
||||
### `DEFAULT_INPUT_TEMPLATE` (オプション)
|
||||
|
||||
『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。
|
||||
|
|
|
@ -10,6 +10,9 @@ import { handle as alibabaHandler } from "../../alibaba";
|
|||
import { handle as moonshotHandler } from "../../moonshot";
|
||||
import { handle as stabilityHandler } from "../../stability";
|
||||
import { handle as iflytekHandler } from "../../iflytek";
|
||||
import { handle as deepseekHandler } from "../../deepseek";
|
||||
import { handle as xaiHandler } from "../../xai";
|
||||
import { handle as chatglmHandler } from "../../glm";
|
||||
import { handle as proxyHandler } from "../../proxy";
|
||||
|
||||
async function handle(
|
||||
|
@ -38,6 +41,12 @@ async function handle(
|
|||
return stabilityHandler(req, { params });
|
||||
case ApiPath.Iflytek:
|
||||
return iflytekHandler(req, { params });
|
||||
case ApiPath.DeepSeek:
|
||||
return deepseekHandler(req, { params });
|
||||
case ApiPath.XAI:
|
||||
return xaiHandler(req, { params });
|
||||
case ApiPath.ChatGLM:
|
||||
return chatglmHandler(req, { params });
|
||||
case ApiPath.OpenAI:
|
||||
return openaiHandler(req, { params });
|
||||
default:
|
||||
|
|
|
@ -8,7 +8,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
|
@ -89,7 +89,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Alibaba as string,
|
||||
|
|
|
@ -9,7 +9,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "./auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
|
||||
|
@ -122,7 +122,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Anthropic as string,
|
||||
|
|
|
@ -92,6 +92,15 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
|
|||
systemApiKey =
|
||||
serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
|
||||
break;
|
||||
case ModelProvider.DeepSeek:
|
||||
systemApiKey = serverConfig.deepseekApiKey;
|
||||
break;
|
||||
case ModelProvider.XAI:
|
||||
systemApiKey = serverConfig.xaiApiKey;
|
||||
break;
|
||||
case ModelProvider.ChatGLM:
|
||||
systemApiKey = serverConfig.chatglmApiKey;
|
||||
break;
|
||||
case ModelProvider.GPT:
|
||||
default:
|
||||
if (req.nextUrl.pathname.includes("azure/deployments")) {
|
||||
|
|
|
@ -8,7 +8,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
import { getAccessToken } from "@/app/utils/baidu";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
@ -104,7 +104,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Baidu as string,
|
||||
|
|
|
@ -8,7 +8,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
|
@ -88,7 +88,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.ByteDance as string,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "../config/server";
|
||||
import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
|
||||
import { isModelAvailableInServer } from "../utils/model";
|
||||
import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
|
||||
import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
|
@ -71,7 +71,7 @@ export async function requestOpenai(req: NextRequest) {
|
|||
.filter((v) => !!v && !v.startsWith("-") && v.includes(modelName))
|
||||
.forEach((m) => {
|
||||
const [fullName, displayName] = m.split("=");
|
||||
const [_, providerName] = fullName.split("@");
|
||||
const [_, providerName] = getModelProvider(fullName);
|
||||
if (providerName === "azure" && !displayName) {
|
||||
const [_, deployId] = (serverConfig?.azureUrl ?? "").split(
|
||||
"deployments/",
|
||||
|
@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.OpenAI as string,
|
||||
) ||
|
||||
isModelAvailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Azure as string,
|
||||
[
|
||||
ServiceProvider.OpenAI,
|
||||
ServiceProvider.Azure,
|
||||
jsonBody?.model as string, // support provider-unspecified model
|
||||
],
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
|
|
|
@ -14,6 +14,7 @@ const DANGER_CONFIG = {
|
|||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
defaultModel: serverConfig.defaultModel,
|
||||
visionModels: serverConfig.visionModels,
|
||||
};
|
||||
|
||||
declare global {
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import {
|
||||
DEEPSEEK_BASE_URL,
|
||||
ApiPath,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
export async function handle(
|
||||
req: NextRequest,
|
||||
{ params }: { params: { path: string[] } },
|
||||
) {
|
||||
console.log("[DeepSeek Route] params ", params);
|
||||
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
|
||||
const authResult = auth(req, ModelProvider.DeepSeek);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await request(req);
|
||||
return response;
|
||||
} catch (e) {
|
||||
console.error("[DeepSeek] ", e);
|
||||
return NextResponse.json(prettyObject(e));
|
||||
}
|
||||
}
|
||||
|
||||
async function request(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
// alibaba use base url or just remove the path
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");
|
||||
|
||||
let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const fetchUrl = `${baseUrl}${path}`;
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: req.headers.get("Authorization") ?? "",
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
// #1815 try to refuse some request to some models
|
||||
if (serverConfig.customModels && req.body) {
|
||||
try {
|
||||
const clonedBody = await req.text();
|
||||
fetchOptions.body = clonedBody;
|
||||
|
||||
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.DeepSeek as string,
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: true,
|
||||
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
},
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`[DeepSeek] filter`, e);
|
||||
}
|
||||
}
|
||||
try {
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import {
|
||||
CHATGLM_BASE_URL,
|
||||
ApiPath,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
export async function handle(
|
||||
req: NextRequest,
|
||||
{ params }: { params: { path: string[] } },
|
||||
) {
|
||||
console.log("[GLM Route] params ", params);
|
||||
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
|
||||
const authResult = auth(req, ModelProvider.ChatGLM);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await request(req);
|
||||
return response;
|
||||
} catch (e) {
|
||||
console.error("[GLM] ", e);
|
||||
return NextResponse.json(prettyObject(e));
|
||||
}
|
||||
}
|
||||
|
||||
async function request(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
// alibaba use base url or just remove the path
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, "");
|
||||
|
||||
let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const fetchUrl = `${baseUrl}${path}`;
|
||||
console.log("[Fetch Url] ", fetchUrl);
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: req.headers.get("Authorization") ?? "",
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
// #1815 try to refuse some request to some models
|
||||
if (serverConfig.customModels && req.body) {
|
||||
try {
|
||||
const clonedBody = await req.text();
|
||||
fetchOptions.body = clonedBody;
|
||||
|
||||
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.ChatGLM as string,
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: true,
|
||||
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
},
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`[GLM] filter`, e);
|
||||
}
|
||||
}
|
||||
try {
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
|
@ -23,7 +23,8 @@ export async function handle(
|
|||
});
|
||||
}
|
||||
|
||||
const bearToken = req.headers.get("Authorization") ?? "";
|
||||
const bearToken =
|
||||
req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || "";
|
||||
const token = bearToken.trim().replaceAll("Bearer ", "").trim();
|
||||
|
||||
const apiKey = token ? token : serverConfig.googleApiKey;
|
||||
|
@ -91,8 +92,8 @@ async function request(req: NextRequest, apiKey: string) {
|
|||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
const fetchUrl = `${baseUrl}${path}?key=${apiKey}${
|
||||
req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : ""
|
||||
const fetchUrl = `${baseUrl}${path}${
|
||||
req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : ""
|
||||
}`;
|
||||
|
||||
console.log("[Fetch Url] ", fetchUrl);
|
||||
|
@ -100,6 +101,9 @@ async function request(req: NextRequest, apiKey: string) {
|
|||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Cache-Control": "no-store",
|
||||
"x-goog-api-key":
|
||||
req.headers.get("x-goog-api-key") ||
|
||||
(req.headers.get("Authorization") ?? "").replace("Bearer ", ""),
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
|
|
|
@ -8,7 +8,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
// iflytek
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
@ -89,7 +89,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Iflytek as string,
|
||||
|
|
|
@ -8,7 +8,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelAvailableInServer } from "@/app/utils/model";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
|
@ -88,7 +88,7 @@ async function request(req: NextRequest) {
|
|||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelAvailableInServer(
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.Moonshot as string,
|
||||
|
|
|
@ -6,7 +6,7 @@ import { NextRequest, NextResponse } from "next/server";
|
|||
import { auth } from "./auth";
|
||||
import { requestOpenai } from "./common";
|
||||
|
||||
const ALLOWD_PATH = new Set(Object.values(OpenaiPath));
|
||||
const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
|
||||
|
||||
function getModels(remoteModelRes: OpenAIListModelResponse) {
|
||||
const config = getServerSideConfig();
|
||||
|
@ -14,8 +14,11 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
|
|||
if (config.disableGPT4) {
|
||||
remoteModelRes.data = remoteModelRes.data.filter(
|
||||
(m) =>
|
||||
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) ||
|
||||
m.id.startsWith("gpt-4o-mini"),
|
||||
!(
|
||||
m.id.startsWith("gpt-4") ||
|
||||
m.id.startsWith("chatgpt-4o") ||
|
||||
m.id.startsWith("o1")
|
||||
) || m.id.startsWith("gpt-4o-mini"),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -34,7 +37,7 @@ export async function handle(
|
|||
|
||||
const subpath = params.path.join("/");
|
||||
|
||||
if (!ALLOWD_PATH.has(subpath)) {
|
||||
if (!ALLOWED_PATH.has(subpath)) {
|
||||
console.log("[OpenAI Route] forbidden path ", subpath);
|
||||
return NextResponse.json(
|
||||
{
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
|
||||
export async function handle(
|
||||
req: NextRequest,
|
||||
|
@ -9,6 +10,7 @@ export async function handle(
|
|||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
// remove path params from searchParams
|
||||
req.nextUrl.searchParams.delete("path");
|
||||
|
@ -31,6 +33,18 @@ export async function handle(
|
|||
return true;
|
||||
}),
|
||||
);
|
||||
// if dalle3 use openai api key
|
||||
const baseUrl = req.headers.get("x-base-url");
|
||||
if (baseUrl?.includes("api.openai.com")) {
|
||||
if (!serverConfig.apiKey) {
|
||||
return NextResponse.json(
|
||||
{ error: "OpenAI API key not configured" },
|
||||
{ status: 500 },
|
||||
);
|
||||
}
|
||||
headers.set("Authorization", `Bearer ${serverConfig.apiKey}`);
|
||||
}
|
||||
|
||||
const controller = new AbortController();
|
||||
const fetchOptions: RequestInit = {
|
||||
headers,
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import {
|
||||
XAI_BASE_URL,
|
||||
ApiPath,
|
||||
ModelProvider,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { isModelNotavailableInServer } from "@/app/utils/model";
|
||||
|
||||
const serverConfig = getServerSideConfig();
|
||||
|
||||
export async function handle(
|
||||
req: NextRequest,
|
||||
{ params }: { params: { path: string[] } },
|
||||
) {
|
||||
console.log("[XAI Route] params ", params);
|
||||
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
|
||||
const authResult = auth(req, ModelProvider.XAI);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await request(req);
|
||||
return response;
|
||||
} catch (e) {
|
||||
console.error("[XAI] ", e);
|
||||
return NextResponse.json(prettyObject(e));
|
||||
}
|
||||
}
|
||||
|
||||
async function request(req: NextRequest) {
|
||||
const controller = new AbortController();
|
||||
|
||||
// alibaba use base url or just remove the path
|
||||
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, "");
|
||||
|
||||
let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL;
|
||||
|
||||
if (!baseUrl.startsWith("http")) {
|
||||
baseUrl = `https://${baseUrl}`;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, -1);
|
||||
}
|
||||
|
||||
console.log("[Proxy] ", path);
|
||||
console.log("[Base Url]", baseUrl);
|
||||
|
||||
const timeoutId = setTimeout(
|
||||
() => {
|
||||
controller.abort();
|
||||
},
|
||||
10 * 60 * 1000,
|
||||
);
|
||||
|
||||
const fetchUrl = `${baseUrl}${path}`;
|
||||
const fetchOptions: RequestInit = {
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: req.headers.get("Authorization") ?? "",
|
||||
},
|
||||
method: req.method,
|
||||
body: req.body,
|
||||
redirect: "manual",
|
||||
// @ts-ignore
|
||||
duplex: "half",
|
||||
signal: controller.signal,
|
||||
};
|
||||
|
||||
// #1815 try to refuse some request to some models
|
||||
if (serverConfig.customModels && req.body) {
|
||||
try {
|
||||
const clonedBody = await req.text();
|
||||
fetchOptions.body = clonedBody;
|
||||
|
||||
const jsonBody = JSON.parse(clonedBody) as { model?: string };
|
||||
|
||||
// not undefined and is false
|
||||
if (
|
||||
isModelNotavailableInServer(
|
||||
serverConfig.customModels,
|
||||
jsonBody?.model as string,
|
||||
ServiceProvider.XAI as string,
|
||||
)
|
||||
) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: true,
|
||||
message: `you are not allowed to use ${jsonBody?.model} model`,
|
||||
},
|
||||
{
|
||||
status: 403,
|
||||
},
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(`[XAI] filter`, e);
|
||||
}
|
||||
}
|
||||
try {
|
||||
const res = await fetch(fetchUrl, fetchOptions);
|
||||
|
||||
// to prevent browser prompt for credentials
|
||||
const newHeaders = new Headers(res.headers);
|
||||
newHeaders.delete("www-authenticate");
|
||||
// to disable nginx buffering
|
||||
newHeaders.set("X-Accel-Buffering", "no");
|
||||
|
||||
return new Response(res.body, {
|
||||
status: res.status,
|
||||
statusText: res.statusText,
|
||||
headers: newHeaders,
|
||||
});
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
|
@ -20,6 +20,9 @@ import { QwenApi } from "./platforms/alibaba";
|
|||
import { HunyuanApi } from "./platforms/tencent";
|
||||
import { MoonshotApi } from "./platforms/moonshot";
|
||||
import { SparkApi } from "./platforms/iflytek";
|
||||
import { DeepSeekApi } from "./platforms/deepseek";
|
||||
import { XAIApi } from "./platforms/xai";
|
||||
import { ChatGLMApi } from "./platforms/glm";
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
@ -68,7 +71,7 @@ export interface ChatOptions {
|
|||
config: LLMConfig;
|
||||
|
||||
onUpdate?: (message: string, chunk: string) => void;
|
||||
onFinish: (message: string) => void;
|
||||
onFinish: (message: string, responseRes: Response) => void;
|
||||
onError?: (err: Error) => void;
|
||||
onController?: (controller: AbortController) => void;
|
||||
onBeforeTool?: (tool: ChatMessageTool) => void;
|
||||
|
@ -152,6 +155,15 @@ export class ClientApi {
|
|||
case ModelProvider.Iflytek:
|
||||
this.llm = new SparkApi();
|
||||
break;
|
||||
case ModelProvider.DeepSeek:
|
||||
this.llm = new DeepSeekApi();
|
||||
break;
|
||||
case ModelProvider.XAI:
|
||||
this.llm = new XAIApi();
|
||||
break;
|
||||
case ModelProvider.ChatGLM:
|
||||
this.llm = new ChatGLMApi();
|
||||
break;
|
||||
default:
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
|
@ -231,7 +243,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||
|
||||
function getConfig() {
|
||||
const modelConfig = chatStore.currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.providerName == ServiceProvider.Google;
|
||||
const isGoogle = modelConfig.providerName === ServiceProvider.Google;
|
||||
const isAzure = modelConfig.providerName === ServiceProvider.Azure;
|
||||
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
|
||||
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
|
||||
|
@ -239,6 +251,9 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
|
||||
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
|
||||
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
|
||||
const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
|
||||
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
|
||||
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
|
||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
|
@ -252,6 +267,12 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||
? accessStore.alibabaApiKey
|
||||
: isMoonshot
|
||||
? accessStore.moonshotApiKey
|
||||
: isXAI
|
||||
? accessStore.xaiApiKey
|
||||
: isDeepSeek
|
||||
? accessStore.deepseekApiKey
|
||||
: isChatGLM
|
||||
? accessStore.chatglmApiKey
|
||||
: isIflytek
|
||||
? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
|
||||
? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
|
||||
|
@ -266,13 +287,22 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||
isAlibaba,
|
||||
isMoonshot,
|
||||
isIflytek,
|
||||
isDeepSeek,
|
||||
isXAI,
|
||||
isChatGLM,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
};
|
||||
}
|
||||
|
||||
function getAuthHeader(): string {
|
||||
return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization";
|
||||
return isAzure
|
||||
? "api-key"
|
||||
: isAnthropic
|
||||
? "x-api-key"
|
||||
: isGoogle
|
||||
? "x-goog-api-key"
|
||||
: "Authorization";
|
||||
}
|
||||
|
||||
const {
|
||||
|
@ -280,17 +310,25 @@ export function getHeaders(ignoreHeaders: boolean = false) {
|
|||
isAzure,
|
||||
isAnthropic,
|
||||
isBaidu,
|
||||
isByteDance,
|
||||
isAlibaba,
|
||||
isMoonshot,
|
||||
isIflytek,
|
||||
isDeepSeek,
|
||||
isXAI,
|
||||
isChatGLM,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
} = getConfig();
|
||||
// when using google api in app, not set auth header
|
||||
if (isGoogle && clientConfig?.isApp) return headers;
|
||||
// when using baidu api in app, not set auth header
|
||||
if (isBaidu && clientConfig?.isApp) return headers;
|
||||
|
||||
const authHeader = getAuthHeader();
|
||||
|
||||
const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic);
|
||||
const bearerToken = getBearerToken(
|
||||
apiKey,
|
||||
isAzure || isAnthropic || isGoogle,
|
||||
);
|
||||
|
||||
if (bearerToken) {
|
||||
headers[authHeader] = bearerToken;
|
||||
|
@ -321,6 +359,12 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
|
|||
return new ClientApi(ModelProvider.Moonshot);
|
||||
case ServiceProvider.Iflytek:
|
||||
return new ClientApi(ModelProvider.Iflytek);
|
||||
case ServiceProvider.DeepSeek:
|
||||
return new ClientApi(ModelProvider.DeepSeek);
|
||||
case ServiceProvider.XAI:
|
||||
return new ClientApi(ModelProvider.XAI);
|
||||
case ServiceProvider.ChatGLM:
|
||||
return new ClientApi(ModelProvider.ChatGLM);
|
||||
default:
|
||||
return new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@ -142,6 +143,7 @@ export class QwenApi implements LLMApi {
|
|||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
|
@ -171,13 +173,14 @@ export class QwenApi implements LLMApi {
|
|||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
@ -186,6 +189,7 @@ export class QwenApi implements LLMApi {
|
|||
"[Alibaba] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
responseRes = res;
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
|
@ -252,7 +256,7 @@ export class QwenApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -8,11 +8,12 @@ import {
|
|||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import { ANTHROPIC_BASE_URL } from "@/app/constant";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { preProcessImageContent, stream } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export type MultiBlockContent = {
|
||||
type: "image" | "text";
|
||||
|
@ -316,13 +317,14 @@ export class ClaudeApi implements LLMApi {
|
|||
};
|
||||
|
||||
try {
|
||||
controller.signal.onabort = () => options.onFinish("");
|
||||
controller.signal.onabort = () =>
|
||||
options.onFinish("", new Response(null, { status: 400 }));
|
||||
|
||||
const res = await fetch(path, payload);
|
||||
const resJson = await res.json();
|
||||
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
} catch (e) {
|
||||
console.error("failed to chat", e);
|
||||
options.onError?.(e as Error);
|
||||
|
@ -388,9 +390,7 @@ export class ClaudeApi implements LLMApi {
|
|||
if (baseUrl.trim().length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/anthropic"
|
||||
: ApiPath.Anthropic;
|
||||
baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
|
||||
}
|
||||
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
|
||||
|
|
|
@ -24,6 +24,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@ -161,6 +162,7 @@ export class ErnieApi implements LLMApi {
|
|||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
|
@ -190,19 +192,20 @@ export class ErnieApi implements LLMApi {
|
|||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Baidu] request response content type: ", contentType);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
|
@ -265,7 +268,7 @@ export class ErnieApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = resJson?.result;
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -23,6 +23,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@ -129,6 +130,7 @@ export class DoubaoApi implements LLMApi {
|
|||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
|
@ -158,13 +160,14 @@ export class DoubaoApi implements LLMApi {
|
|||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
@ -173,7 +176,7 @@ export class DoubaoApi implements LLMApi {
|
|||
"[ByteDance] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
|
@ -239,7 +242,7 @@ export class DoubaoApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEEPSEEK_BASE_URL,
|
||||
DeepSeek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class DeepSeekApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.deepseekUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.DeepSeek;
|
||||
baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(DeepSeek.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
|
@ -0,0 +1,293 @@
|
|||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
CHATGLM_BASE_URL,
|
||||
ChatGLM,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
|
||||
interface BasePayload {
|
||||
model: string;
|
||||
}
|
||||
|
||||
interface ChatPayload extends BasePayload {
|
||||
messages: ChatOptions["messages"];
|
||||
stream?: boolean;
|
||||
temperature?: number;
|
||||
presence_penalty?: number;
|
||||
frequency_penalty?: number;
|
||||
top_p?: number;
|
||||
}
|
||||
|
||||
interface ImageGenerationPayload extends BasePayload {
|
||||
prompt: string;
|
||||
size?: string;
|
||||
user_id?: string;
|
||||
}
|
||||
|
||||
interface VideoGenerationPayload extends BasePayload {
|
||||
prompt: string;
|
||||
duration?: number;
|
||||
resolution?: string;
|
||||
user_id?: string;
|
||||
}
|
||||
|
||||
type ModelType = "chat" | "image" | "video";
|
||||
|
||||
export class ChatGLMApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
private getModelType(model: string): ModelType {
|
||||
if (model.startsWith("cogview-")) return "image";
|
||||
if (model.startsWith("cogvideo-")) return "video";
|
||||
return "chat";
|
||||
}
|
||||
|
||||
private getModelPath(type: ModelType): string {
|
||||
switch (type) {
|
||||
case "image":
|
||||
return ChatGLM.ImagePath;
|
||||
case "video":
|
||||
return ChatGLM.VideoPath;
|
||||
default:
|
||||
return ChatGLM.ChatPath;
|
||||
}
|
||||
}
|
||||
|
||||
private createPayload(
|
||||
messages: ChatOptions["messages"],
|
||||
modelConfig: any,
|
||||
options: ChatOptions,
|
||||
): BasePayload {
|
||||
const modelType = this.getModelType(modelConfig.model);
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
const prompt =
|
||||
typeof lastMessage.content === "string"
|
||||
? lastMessage.content
|
||||
: lastMessage.content.map((c) => c.text).join("\n");
|
||||
|
||||
switch (modelType) {
|
||||
case "image":
|
||||
return {
|
||||
model: modelConfig.model,
|
||||
prompt,
|
||||
size: options.config.size,
|
||||
} as ImageGenerationPayload;
|
||||
default:
|
||||
return {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
} as ChatPayload;
|
||||
}
|
||||
}
|
||||
|
||||
private parseResponse(modelType: ModelType, json: any): string {
|
||||
switch (modelType) {
|
||||
case "image": {
|
||||
const imageUrl = json.data?.[0]?.url;
|
||||
return imageUrl ? `` : "";
|
||||
}
|
||||
case "video": {
|
||||
const videoUrl = json.data?.[0]?.url;
|
||||
return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
|
||||
}
|
||||
default:
|
||||
return this.extractMessage(json);
|
||||
}
|
||||
}
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.chatglmUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.ChatGLM;
|
||||
baseUrl = isApp ? CHATGLM_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
const modelType = this.getModelType(modelConfig.model);
|
||||
const requestPayload = this.createPayload(messages, modelConfig, options);
|
||||
const path = this.path(this.getModelPath(modelType));
|
||||
|
||||
console.log(`[Request] glm ${modelType} payload: `, requestPayload);
|
||||
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (modelType === "image" || modelType === "video") {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
console.log(`[Response] glm ${modelType}:`, resJson);
|
||||
const message = this.parseResponse(modelType, resJson);
|
||||
options.onFinish(message, res);
|
||||
return;
|
||||
}
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
path,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(path, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
|
@ -7,24 +7,29 @@ import {
|
|||
LLMUsage,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
usePluginStore,
|
||||
ChatMessageTool,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { GEMINI_BASE_URL } from "@/app/constant";
|
||||
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { nanoid } from "nanoid";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
path(path: string, shouldStream = false): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
@ -34,7 +39,7 @@ export class GeminiProApi implements LLMApi {
|
|||
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
if (baseUrl.length === 0) {
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google;
|
||||
baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
|
||||
}
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
|
@ -46,19 +51,27 @@ export class GeminiProApi implements LLMApi {
|
|||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
let chatPath = [baseUrl, path].join("/");
|
||||
|
||||
if (shouldStream) {
|
||||
chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
|
||||
// if chatPath.startsWith('http') then add key in query string
|
||||
if (chatPath.startsWith("http") && accessStore.googleApiKey) {
|
||||
chatPath += `&key=${accessStore.googleApiKey}`;
|
||||
}
|
||||
|
||||
return chatPath;
|
||||
}
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
|
||||
const getTextFromParts = (parts: any[]) => {
|
||||
if (!Array.isArray(parts)) return "";
|
||||
|
||||
return parts
|
||||
.map((part) => part?.text || "")
|
||||
.filter((text) => text.trim() !== "")
|
||||
.join("\n\n");
|
||||
};
|
||||
|
||||
return (
|
||||
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
|
||||
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
|
||||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
||||
res?.error?.message ||
|
||||
""
|
||||
);
|
||||
|
@ -165,7 +178,10 @@ export class GeminiProApi implements LLMApi {
|
|||
options.onController?.(controller);
|
||||
try {
|
||||
// https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
|
||||
const chatPath = this.path(Google.ChatPath(modelConfig.model));
|
||||
const chatPath = this.path(
|
||||
Google.ChatPath(modelConfig.model),
|
||||
shouldStream,
|
||||
);
|
||||
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
|
@ -181,114 +197,87 @@ export class GeminiProApi implements LLMApi {
|
|||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
finish();
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Gemini] request response content type: ",
|
||||
contentType,
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
// @ts-ignore
|
||||
tools.length > 0
|
||||
? // @ts-ignore
|
||||
[{ functionDeclarations: tools.map((tool) => tool.function) }]
|
||||
: [],
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const chunkJson = JSON.parse(text);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
const functionCall = chunkJson?.candidates
|
||||
?.at(0)
|
||||
?.content.parts.at(0)?.functionCall;
|
||||
if (functionCall) {
|
||||
const { name, args } = functionCall;
|
||||
runTools.push({
|
||||
id: nanoid(),
|
||||
type: "function",
|
||||
function: {
|
||||
name,
|
||||
arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const delta = apiClient.extractMessage(json);
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
|
||||
const blockReason = json?.promptFeedback?.blockReason;
|
||||
if (blockReason) {
|
||||
// being blocked
|
||||
console.log(`[Google] [Safety Ratings] result:`, blockReason);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
}
|
||||
return chunkJson?.candidates
|
||||
?.at(0)
|
||||
?.content.parts?.map((part: { text: string }) => part.text)
|
||||
.join("\n\n");
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.contents?.length,
|
||||
0,
|
||||
{
|
||||
role: "model",
|
||||
parts: toolCallMessage.tool_calls.map(
|
||||
(tool: ChatMessageTool) => ({
|
||||
functionCall: {
|
||||
name: tool?.function?.name,
|
||||
args: JSON.parse(tool?.function?.arguments as string),
|
||||
},
|
||||
}),
|
||||
),
|
||||
},
|
||||
// @ts-ignore
|
||||
...toolCallResult.map((result) => ({
|
||||
role: "function",
|
||||
parts: [
|
||||
{
|
||||
functionResponse: {
|
||||
name: result.name,
|
||||
response: {
|
||||
name: result.name,
|
||||
content: result.content, // TODO just text content...
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
})),
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
@ -303,7 +292,7 @@ export class GeminiProApi implements LLMApi {
|
|||
);
|
||||
}
|
||||
const message = apiClient.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
IFLYTEK_BASE_URL,
|
||||
Iflytek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
|
@ -22,6 +22,7 @@ import {
|
|||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
import { RequestPayload } from "./openai";
|
||||
|
||||
|
@ -40,7 +41,7 @@ export class SparkApi implements LLMApi {
|
|||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Iflytek;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
@ -116,6 +117,7 @@ export class SparkApi implements LLMApi {
|
|||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// Animate response text to make it look smooth
|
||||
function animateResponseText() {
|
||||
|
@ -142,19 +144,20 @@ export class SparkApi implements LLMApi {
|
|||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Spark] request response content type: ", contentType);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
|
@ -229,7 +232,7 @@ export class SparkApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
MOONSHOT_BASE_URL,
|
||||
Moonshot,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
|
@ -24,6 +24,7 @@ import {
|
|||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class MoonshotApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
@ -40,7 +41,7 @@ export class MoonshotApi implements LLMApi {
|
|||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Moonshot;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
@ -179,7 +180,7 @@ export class MoonshotApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
OPENAI_BASE_URL,
|
||||
DEFAULT_MODELS,
|
||||
OpenaiPath,
|
||||
Azure,
|
||||
|
@ -24,7 +24,7 @@ import {
|
|||
stream,
|
||||
} from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing";
|
||||
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
|
@ -42,6 +42,7 @@ import {
|
|||
isVisionModel,
|
||||
isDalle3 as _isDalle3,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@ -64,6 +65,7 @@ export interface RequestPayload {
|
|||
frequency_penalty: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
max_completion_tokens?: number;
|
||||
}
|
||||
|
||||
export interface DalleRequestPayload {
|
||||
|
@ -71,7 +73,7 @@ export interface DalleRequestPayload {
|
|||
prompt: string;
|
||||
response_format: "url" | "b64_json";
|
||||
n: number;
|
||||
size: DalleSize;
|
||||
size: ModelSize;
|
||||
quality: DalleQuality;
|
||||
style: DalleStyle;
|
||||
}
|
||||
|
@ -98,7 +100,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
@ -222,7 +224,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
|
||||
requestPayload = {
|
||||
messages,
|
||||
stream: !isO1 ? options.config.stream : false,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: !isO1 ? modelConfig.temperature : 1,
|
||||
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
|
||||
|
@ -232,6 +234,11 @@ export class ChatGPTApi implements LLMApi {
|
|||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||
if (isO1) {
|
||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||
}
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
|
@ -240,7 +247,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !isDalle3 && !!options.config.stream && !isO1;
|
||||
const shouldStream = !isDalle3 && !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
|
@ -352,7 +359,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
);
|
||||
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
|
@ -360,7 +367,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = await this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"use client";
|
||||
import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
|
@ -22,6 +22,7 @@ import mapKeys from "lodash-es/mapKeys";
|
|||
import mapValues from "lodash-es/mapValues";
|
||||
import isArray from "lodash-es/isArray";
|
||||
import isObject from "lodash-es/isObject";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@ -70,9 +71,7 @@ export class HunyuanApi implements LLMApi {
|
|||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/tencent"
|
||||
: ApiPath.Tencent;
|
||||
baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
|
@ -143,6 +142,7 @@ export class HunyuanApi implements LLMApi {
|
|||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
|
@ -172,13 +172,14 @@ export class HunyuanApi implements LLMApi {
|
|||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: fetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
@ -187,7 +188,7 @@ export class HunyuanApi implements LLMApi {
|
|||
"[Tencent] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
responseRes = res;
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
|
@ -253,7 +254,7 @@ export class HunyuanApi implements LLMApi {
|
|||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
|
|
|
@ -0,0 +1,193 @@
|
|||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
useAppConfig,
|
||||
useChatStore,
|
||||
ChatMessageTool,
|
||||
usePluginStore,
|
||||
} from "@/app/store";
|
||||
import { stream } from "@/app/utils/chat";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export class XAIApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.xaiUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.XAI;
|
||||
baseUrl = isApp ? XAI_BASE_URL : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
speech(options: SpeechOptions): Promise<ArrayBuffer> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
|
||||
console.log("[Request] xai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(XAI.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = usePluginStore
|
||||
.getState()
|
||||
.getAsTools(
|
||||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
return stream(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
getHeaders(),
|
||||
tools as any,
|
||||
funcs,
|
||||
controller,
|
||||
// parseSSE
|
||||
(text: string, runTools: ChatMessageTool[]) => {
|
||||
// console.log("parseSSE", text, runTools);
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: {
|
||||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
};
|
||||
}>;
|
||||
const tool_calls = choices[0]?.delta?.tool_calls;
|
||||
if (tool_calls?.length > 0) {
|
||||
const index = tool_calls[0]?.index;
|
||||
const id = tool_calls[0]?.id;
|
||||
const args = tool_calls[0]?.function?.arguments;
|
||||
if (id) {
|
||||
runTools.push({
|
||||
id,
|
||||
type: tool_calls[0]?.type,
|
||||
function: {
|
||||
name: tool_calls[0]?.function?.name as string,
|
||||
arguments: args,
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// @ts-ignore
|
||||
runTools[index]["function"]["arguments"] += args;
|
||||
}
|
||||
}
|
||||
return choices[0]?.delta?.content;
|
||||
},
|
||||
// processToolMessage, include tool_calls message and tool call results
|
||||
(
|
||||
requestPayload: RequestPayload,
|
||||
toolCallMessage: any,
|
||||
toolCallResult: any[],
|
||||
) => {
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.splice(
|
||||
// @ts-ignore
|
||||
requestPayload?.messages?.length,
|
||||
0,
|
||||
toolCallMessage,
|
||||
...toolCallResult,
|
||||
);
|
||||
},
|
||||
options,
|
||||
);
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message, res);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
|
@ -11,12 +11,15 @@ import Logo from "../icons/logo.svg";
|
|||
import { useMobileScreen } from "@/app/utils";
|
||||
import BotIcon from "../icons/bot.svg";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { PasswordInput } from "./ui-lib";
|
||||
import LeftIcon from "@/app/icons/left.svg";
|
||||
import { safeLocalStorage } from "@/app/utils";
|
||||
import {
|
||||
trackSettingsPageGuideToCPaymentClick,
|
||||
trackAuthorizationPageButtonToCPaymentClick,
|
||||
} from "../utils/auth-settings-events";
|
||||
import clsx from "clsx";
|
||||
|
||||
const storage = safeLocalStorage();
|
||||
|
||||
export function AuthPage() {
|
||||
|
@ -53,43 +56,50 @@ export function AuthPage() {
|
|||
onClick={() => navigate(Path.Home)}
|
||||
></IconButton>
|
||||
</div>
|
||||
<div className={`no-dark ${styles["auth-logo"]}`}>
|
||||
<div className={clsx("no-dark", styles["auth-logo"])}>
|
||||
<BotIcon />
|
||||
</div>
|
||||
|
||||
<div className={styles["auth-title"]}>{Locale.Auth.Title}</div>
|
||||
<div className={styles["auth-tips"]}>{Locale.Auth.Tips}</div>
|
||||
|
||||
<input
|
||||
className={styles["auth-input"]}
|
||||
type="password"
|
||||
placeholder={Locale.Auth.Input}
|
||||
<PasswordInput
|
||||
style={{ marginTop: "3vh", marginBottom: "3vh" }}
|
||||
aria={Locale.Settings.ShowPassword}
|
||||
aria-label={Locale.Auth.Input}
|
||||
value={accessStore.accessCode}
|
||||
type="text"
|
||||
placeholder={Locale.Auth.Input}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.accessCode = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
|
||||
{!accessStore.hideUserApiKey ? (
|
||||
<>
|
||||
<div className={styles["auth-tips"]}>{Locale.Auth.SubTips}</div>
|
||||
<input
|
||||
className={styles["auth-input"]}
|
||||
type="password"
|
||||
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
|
||||
<PasswordInput
|
||||
style={{ marginTop: "3vh", marginBottom: "3vh" }}
|
||||
aria={Locale.Settings.ShowPassword}
|
||||
aria-label={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
|
||||
value={accessStore.openaiApiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.openaiApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
<input
|
||||
className={styles["auth-input-second"]}
|
||||
type="password"
|
||||
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder}
|
||||
<PasswordInput
|
||||
style={{ marginTop: "3vh", marginBottom: "3vh" }}
|
||||
aria={Locale.Settings.ShowPassword}
|
||||
aria-label={Locale.Settings.Access.Google.ApiKey.Placeholder}
|
||||
value={accessStore.googleApiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.googleApiKey = e.currentTarget.value),
|
||||
|
@ -155,7 +165,7 @@ function TopBanner() {
|
|||
onMouseEnter={handleMouseEnter}
|
||||
onMouseLeave={handleMouseLeave}
|
||||
>
|
||||
<div className={`${styles["top-banner-inner"]} no-dark`}>
|
||||
<div className={clsx(styles["top-banner-inner"], "no-dark")}>
|
||||
<Logo className={styles["top-banner-logo"]}></Logo>
|
||||
<span>
|
||||
{Locale.Auth.TopTips}
|
||||
|
|
|
@ -2,6 +2,7 @@ import * as React from "react";
|
|||
|
||||
import styles from "./button.module.scss";
|
||||
import { CSSProperties } from "react";
|
||||
import clsx from "clsx";
|
||||
|
||||
export type ButtonType = "primary" | "danger" | null;
|
||||
|
||||
|
@ -22,12 +23,16 @@ export function IconButton(props: {
|
|||
}) {
|
||||
return (
|
||||
<button
|
||||
className={
|
||||
styles["icon-button"] +
|
||||
` ${props.bordered && styles.border} ${props.shadow && styles.shadow} ${
|
||||
props.className ?? ""
|
||||
} clickable ${styles[props.type ?? ""]}`
|
||||
}
|
||||
className={clsx(
|
||||
"clickable",
|
||||
styles["icon-button"],
|
||||
{
|
||||
[styles.border]: props.bordered,
|
||||
[styles.shadow]: props.shadow,
|
||||
},
|
||||
styles[props.type ?? ""],
|
||||
props.className,
|
||||
)}
|
||||
onClick={props.onClick}
|
||||
title={props.title}
|
||||
disabled={props.disabled}
|
||||
|
@ -40,10 +45,9 @@ export function IconButton(props: {
|
|||
{props.icon && (
|
||||
<div
|
||||
aria-label={props.text || props.title}
|
||||
className={
|
||||
styles["icon-button-icon"] +
|
||||
` ${props.type === "primary" && "no-dark"}`
|
||||
}
|
||||
className={clsx(styles["icon-button-icon"], {
|
||||
"no-dark": props.type === "primary",
|
||||
})}
|
||||
>
|
||||
{props.icon}
|
||||
</div>
|
||||
|
|
|
@ -18,6 +18,7 @@ import { Mask } from "../store/mask";
|
|||
import { useRef, useEffect } from "react";
|
||||
import { showConfirm } from "./ui-lib";
|
||||
import { useMobileScreen } from "../utils";
|
||||
import clsx from "clsx";
|
||||
|
||||
export function ChatItem(props: {
|
||||
onClick?: () => void;
|
||||
|
@ -45,11 +46,11 @@ export function ChatItem(props: {
|
|||
<Draggable draggableId={`${props.id}`} index={props.index}>
|
||||
{(provided) => (
|
||||
<div
|
||||
className={`${styles["chat-item"]} ${
|
||||
className={clsx(styles["chat-item"], {
|
||||
[styles["chat-item-selected"]]:
|
||||
props.selected &&
|
||||
(currentPath === Path.Chat || currentPath === Path.Home) &&
|
||||
styles["chat-item-selected"]
|
||||
}`}
|
||||
(currentPath === Path.Chat || currentPath === Path.Home),
|
||||
})}
|
||||
onClick={props.onClick}
|
||||
ref={(ele) => {
|
||||
draggableRef.current = ele;
|
||||
|
@ -63,7 +64,7 @@ export function ChatItem(props: {
|
|||
>
|
||||
{props.narrow ? (
|
||||
<div className={styles["chat-item-narrow"]}>
|
||||
<div className={styles["chat-item-avatar"] + " no-dark"}>
|
||||
<div className={clsx(styles["chat-item-avatar"], "no-dark")}>
|
||||
<MaskAvatar
|
||||
avatar={props.mask.avatar}
|
||||
model={props.mask.modelConfig.model}
|
||||
|
|
|
@ -45,6 +45,14 @@
|
|||
.chat-input-actions {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
justify-content: space-between;
|
||||
gap: 5px;
|
||||
|
||||
&-end {
|
||||
display: flex;
|
||||
margin-left: auto;
|
||||
gap: 5px;
|
||||
}
|
||||
|
||||
.chat-input-action {
|
||||
display: inline-flex;
|
||||
|
@ -62,10 +70,6 @@
|
|||
width: var(--icon-width);
|
||||
overflow: hidden;
|
||||
|
||||
&:not(:last-child) {
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
||||
.text {
|
||||
white-space: nowrap;
|
||||
padding-left: 5px;
|
||||
|
@ -231,10 +235,12 @@
|
|||
|
||||
animation: slide-in ease 0.3s;
|
||||
|
||||
$linear: linear-gradient(to right,
|
||||
$linear: linear-gradient(
|
||||
to right,
|
||||
rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 1),
|
||||
rgba(0, 0, 0, 0));
|
||||
rgba(0, 0, 0, 0)
|
||||
);
|
||||
mask-image: $linear;
|
||||
|
||||
@mixin show {
|
||||
|
@ -373,7 +379,7 @@
|
|||
}
|
||||
}
|
||||
|
||||
.chat-message-user>.chat-message-container {
|
||||
.chat-message-user > .chat-message-container {
|
||||
align-items: flex-end;
|
||||
}
|
||||
|
||||
|
@ -443,6 +449,25 @@
|
|||
transition: all ease 0.3s;
|
||||
}
|
||||
|
||||
.chat-message-audio {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
border-radius: 10px;
|
||||
background-color: rgba(0, 0, 0, 0.05);
|
||||
border: var(--border-in-light);
|
||||
position: relative;
|
||||
transition: all ease 0.3s;
|
||||
margin-top: 10px;
|
||||
font-size: 14px;
|
||||
user-select: text;
|
||||
word-break: break-word;
|
||||
box-sizing: border-box;
|
||||
audio {
|
||||
height: 30px; /* 调整高度 */
|
||||
}
|
||||
}
|
||||
|
||||
.chat-message-item-image {
|
||||
width: 100%;
|
||||
margin-top: 10px;
|
||||
|
@ -471,9 +496,8 @@
|
|||
border: rgba($color: #888, $alpha: 0.2) 1px solid;
|
||||
}
|
||||
|
||||
|
||||
@media only screen and (max-width: 600px) {
|
||||
$calc-image-width: calc(100vw/3*2/var(--image-count));
|
||||
$calc-image-width: calc(100vw / 3 * 2 / var(--image-count));
|
||||
|
||||
.chat-message-item-image-multi {
|
||||
width: $calc-image-width;
|
||||
|
@ -481,13 +505,18 @@
|
|||
}
|
||||
|
||||
.chat-message-item-image {
|
||||
max-width: calc(100vw/3*2);
|
||||
max-width: calc(100vw / 3 * 2);
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (min-width: 600px) {
|
||||
$max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count));
|
||||
$image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count));
|
||||
$max-image-width: calc(
|
||||
calc(1200px - var(--sidebar-width)) / 3 * 2 / var(--image-count)
|
||||
);
|
||||
$image-width: calc(
|
||||
calc(var(--window-width) - var(--sidebar-width)) / 3 * 2 /
|
||||
var(--image-count)
|
||||
);
|
||||
|
||||
.chat-message-item-image-multi {
|
||||
width: $image-width;
|
||||
|
@ -497,7 +526,7 @@
|
|||
}
|
||||
|
||||
.chat-message-item-image {
|
||||
max-width: calc(calc(1200px - var(--sidebar-width))/3*2);
|
||||
max-width: calc(calc(1200px - var(--sidebar-width)) / 3 * 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -515,7 +544,7 @@
|
|||
z-index: 1;
|
||||
}
|
||||
|
||||
.chat-message-user>.chat-message-container>.chat-message-item {
|
||||
.chat-message-user > .chat-message-container > .chat-message-item {
|
||||
background-color: var(--second);
|
||||
|
||||
&:hover {
|
||||
|
@ -626,7 +655,8 @@
|
|||
min-height: 68px;
|
||||
}
|
||||
|
||||
.chat-input:focus {}
|
||||
.chat-input:focus {
|
||||
}
|
||||
|
||||
.chat-input-send {
|
||||
background-color: var(--primary);
|
||||
|
@ -694,3 +724,30 @@
|
|||
font-size: 12px;
|
||||
color: var(--black);
|
||||
}
|
||||
|
||||
.chat-main {
|
||||
display: flex;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
.chat-body-container {
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
flex: 1;
|
||||
width: 100%;
|
||||
}
|
||||
.chat-side-panel {
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
background: var(--white);
|
||||
overflow: hidden;
|
||||
z-index: 10;
|
||||
transform: translateX(100%);
|
||||
transition: all ease 0.3s;
|
||||
&-show {
|
||||
transform: translateX(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ import StyleIcon from "../icons/palette.svg";
|
|||
import PluginIcon from "../icons/plugin.svg";
|
||||
import ShortcutkeyIcon from "../icons/shortcutkey.svg";
|
||||
import ReloadIcon from "../icons/reload.svg";
|
||||
|
||||
import HeadphoneIcon from "../icons/headphone.svg";
|
||||
import {
|
||||
ChatMessage,
|
||||
SubmitKey,
|
||||
|
@ -72,6 +72,8 @@ import {
|
|||
isDalle3,
|
||||
showPlugins,
|
||||
safeLocalStorage,
|
||||
getModelSizes,
|
||||
supportsCustomSize,
|
||||
} from "../utils";
|
||||
|
||||
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
||||
|
@ -79,7 +81,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
|
|||
import dynamic from "next/dynamic";
|
||||
|
||||
import { ChatControllerPool } from "../client/controller";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "../typing";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
|
||||
import { Prompt, usePromptStore } from "../store/prompt";
|
||||
import Locale from "../locales";
|
||||
|
||||
|
@ -115,11 +117,17 @@ import { getClientConfig } from "../config/client";
|
|||
import { useAllModels } from "../utils/hooks";
|
||||
import { MultimodalContent } from "../client/api";
|
||||
|
||||
const localStorage = safeLocalStorage();
|
||||
import { ClientApi } from "../client/api";
|
||||
import { createTTSPlayer } from "../utils/audio";
|
||||
import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts";
|
||||
|
||||
import { isEmpty } from "lodash-es";
|
||||
import { getModelProvider } from "../utils/model";
|
||||
import { RealtimeChat } from "@/app/components/realtime-chat";
|
||||
import clsx from "clsx";
|
||||
|
||||
const localStorage = safeLocalStorage();
|
||||
|
||||
const ttsPlayer = createTTSPlayer();
|
||||
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
|
@ -145,7 +153,8 @@ export function SessionConfigModel(props: { onClose: () => void }) {
|
|||
text={Locale.Chat.Config.Reset}
|
||||
onClick={async () => {
|
||||
if (await showConfirm(Locale.Memory.ResetConfirm)) {
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.memoryPrompt = ""),
|
||||
);
|
||||
}
|
||||
|
@ -170,7 +179,10 @@ export function SessionConfigModel(props: { onClose: () => void }) {
|
|||
updateMask={(updater) => {
|
||||
const mask = { ...session.mask };
|
||||
updater(mask);
|
||||
chatStore.updateCurrentSession((session) => (session.mask = mask));
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.mask = mask),
|
||||
);
|
||||
}}
|
||||
shouldSyncFromGlobal
|
||||
extraListItems={
|
||||
|
@ -203,7 +215,7 @@ function PromptToast(props: {
|
|||
<div className={styles["prompt-toast"]} key="prompt-toast">
|
||||
{props.showToast && context.length > 0 && (
|
||||
<div
|
||||
className={styles["prompt-toast-inner"] + " clickable"}
|
||||
className={clsx(styles["prompt-toast-inner"], "clickable")}
|
||||
role="button"
|
||||
onClick={() => props.setShowModal(true)}
|
||||
>
|
||||
|
@ -324,10 +336,9 @@ export function PromptHints(props: {
|
|||
{props.prompts.map((prompt, i) => (
|
||||
<div
|
||||
ref={i === selectIndex ? selectedRef : null}
|
||||
className={
|
||||
styles["prompt-hint"] +
|
||||
` ${i === selectIndex ? styles["prompt-hint-selected"] : ""}`
|
||||
}
|
||||
className={clsx(styles["prompt-hint"], {
|
||||
[styles["prompt-hint-selected"]]: i === selectIndex,
|
||||
})}
|
||||
key={prompt.title + i.toString()}
|
||||
onClick={() => props.onPromptSelect(prompt)}
|
||||
onMouseEnter={() => setSelectIndex(i)}
|
||||
|
@ -342,12 +353,14 @@ export function PromptHints(props: {
|
|||
|
||||
function ClearContextDivider() {
|
||||
const chatStore = useChatStore();
|
||||
const session = chatStore.currentSession();
|
||||
|
||||
return (
|
||||
<div
|
||||
className={styles["clear-context"]}
|
||||
onClick={() =>
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.clearContextIndex = undefined),
|
||||
)
|
||||
}
|
||||
|
@ -385,7 +398,7 @@ export function ChatAction(props: {
|
|||
|
||||
return (
|
||||
<div
|
||||
className={`${styles["chat-input-action"]} clickable`}
|
||||
className={clsx(styles["chat-input-action"], "clickable")}
|
||||
onClick={() => {
|
||||
props.onClick();
|
||||
setTimeout(updateWidth, 1);
|
||||
|
@ -452,11 +465,13 @@ export function ChatActions(props: {
|
|||
uploading: boolean;
|
||||
setShowShortcutKeyModal: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
setUserInput: (input: string) => void;
|
||||
setShowChatSidePanel: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
}) {
|
||||
const config = useAppConfig();
|
||||
const navigate = useNavigate();
|
||||
const chatStore = useChatStore();
|
||||
const pluginStore = usePluginStore();
|
||||
const session = chatStore.currentSession();
|
||||
|
||||
// switch themes
|
||||
const theme = config.theme;
|
||||
|
@ -473,10 +488,9 @@ export function ChatActions(props: {
|
|||
const stopAll = () => ChatControllerPool.stopAll();
|
||||
|
||||
// switch model
|
||||
const currentModel = chatStore.currentSession().mask.modelConfig.model;
|
||||
const currentModel = session.mask.modelConfig.model;
|
||||
const currentProviderName =
|
||||
chatStore.currentSession().mask.modelConfig?.providerName ||
|
||||
ServiceProvider.OpenAI;
|
||||
session.mask.modelConfig?.providerName || ServiceProvider.OpenAI;
|
||||
const allModels = useAllModels();
|
||||
const models = useMemo(() => {
|
||||
const filteredModels = allModels.filter((m) => m.available);
|
||||
|
@ -507,15 +521,13 @@ export function ChatActions(props: {
|
|||
const [showSizeSelector, setShowSizeSelector] = useState(false);
|
||||
const [showQualitySelector, setShowQualitySelector] = useState(false);
|
||||
const [showStyleSelector, setShowStyleSelector] = useState(false);
|
||||
const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"];
|
||||
const modelSizes = getModelSizes(currentModel);
|
||||
const dalle3Qualitys: DalleQuality[] = ["standard", "hd"];
|
||||
const dalle3Styles: DalleStyle[] = ["vivid", "natural"];
|
||||
const currentSize =
|
||||
chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024";
|
||||
const currentQuality =
|
||||
chatStore.currentSession().mask.modelConfig?.quality ?? "standard";
|
||||
const currentStyle =
|
||||
chatStore.currentSession().mask.modelConfig?.style ?? "vivid";
|
||||
session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize);
|
||||
const currentQuality = session.mask.modelConfig?.quality ?? "standard";
|
||||
const currentStyle = session.mask.modelConfig?.style ?? "vivid";
|
||||
|
||||
const isMobileScreen = useMobileScreen();
|
||||
|
||||
|
@ -533,7 +545,7 @@ export function ChatActions(props: {
|
|||
if (isUnavailableModel && models.length > 0) {
|
||||
// show next model to default model if exist
|
||||
let nextModel = models.find((model) => model.isDefault) || models[0];
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.model = nextModel.name;
|
||||
session.mask.modelConfig.providerName = nextModel?.provider
|
||||
?.providerName as ServiceProvider;
|
||||
|
@ -544,10 +556,11 @@ export function ChatActions(props: {
|
|||
: nextModel.name,
|
||||
);
|
||||
}
|
||||
}, [chatStore, currentModel, models]);
|
||||
}, [chatStore, currentModel, models, session]);
|
||||
|
||||
return (
|
||||
<div className={styles["chat-input-actions"]}>
|
||||
<>
|
||||
{couldStop && (
|
||||
<ChatAction
|
||||
onClick={stopAll}
|
||||
|
@ -611,7 +624,7 @@ export function ChatActions(props: {
|
|||
text={Locale.Chat.InputActions.Clear}
|
||||
icon={<BreakIcon />}
|
||||
onClick={() => {
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
if (session.clearContextIndex === session.messages.length) {
|
||||
session.clearContextIndex = undefined;
|
||||
} else {
|
||||
|
@ -642,8 +655,8 @@ export function ChatActions(props: {
|
|||
onClose={() => setShowModelSelector(false)}
|
||||
onSelection={(s) => {
|
||||
if (s.length === 0) return;
|
||||
const [model, providerName] = s[0].split("@");
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
const [model, providerName] = getModelProvider(s[0]);
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.model = model as ModelType;
|
||||
session.mask.modelConfig.providerName =
|
||||
providerName as ServiceProvider;
|
||||
|
@ -652,7 +665,8 @@ export function ChatActions(props: {
|
|||
if (providerName == "ByteDance") {
|
||||
const selectedModel = models.find(
|
||||
(m) =>
|
||||
m.name == model && m?.provider?.providerName == providerName,
|
||||
m.name == model &&
|
||||
m?.provider?.providerName == providerName,
|
||||
);
|
||||
showToast(selectedModel?.displayName ?? "");
|
||||
} else {
|
||||
|
@ -662,7 +676,7 @@ export function ChatActions(props: {
|
|||
/>
|
||||
)}
|
||||
|
||||
{isDalle3(currentModel) && (
|
||||
{supportsCustomSize(currentModel) && (
|
||||
<ChatAction
|
||||
onClick={() => setShowSizeSelector(true)}
|
||||
text={currentSize}
|
||||
|
@ -673,7 +687,7 @@ export function ChatActions(props: {
|
|||
{showSizeSelector && (
|
||||
<Selector
|
||||
defaultSelectedValue={currentSize}
|
||||
items={dalle3Sizes.map((m) => ({
|
||||
items={modelSizes.map((m) => ({
|
||||
title: m,
|
||||
value: m,
|
||||
}))}
|
||||
|
@ -681,7 +695,7 @@ export function ChatActions(props: {
|
|||
onSelection={(s) => {
|
||||
if (s.length === 0) return;
|
||||
const size = s[0];
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.size = size;
|
||||
});
|
||||
showToast(size);
|
||||
|
@ -708,7 +722,7 @@ export function ChatActions(props: {
|
|||
onSelection={(q) => {
|
||||
if (q.length === 0) return;
|
||||
const quality = q[0];
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.quality = quality;
|
||||
});
|
||||
showToast(quality);
|
||||
|
@ -735,7 +749,7 @@ export function ChatActions(props: {
|
|||
onSelection={(s) => {
|
||||
if (s.length === 0) return;
|
||||
const style = s[0];
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.modelConfig.style = style;
|
||||
});
|
||||
showToast(style);
|
||||
|
@ -766,7 +780,7 @@ export function ChatActions(props: {
|
|||
}))}
|
||||
onClose={() => setShowPluginSelector(false)}
|
||||
onSelection={(s) => {
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.mask.plugin = s as string[];
|
||||
});
|
||||
}}
|
||||
|
@ -780,6 +794,16 @@ export function ChatActions(props: {
|
|||
icon={<ShortcutkeyIcon />}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
<div className={styles["chat-input-actions-end"]}>
|
||||
{config.realtimeConfig.enable && (
|
||||
<ChatAction
|
||||
onClick={() => props.setShowChatSidePanel(true)}
|
||||
text={"Realtime Chat"}
|
||||
icon={<HeadphoneIcon />}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -809,7 +833,8 @@ export function EditMessageModal(props: { onClose: () => void }) {
|
|||
icon={<ConfirmIcon />}
|
||||
key="ok"
|
||||
onClick={() => {
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.messages = messages),
|
||||
);
|
||||
props.onClose();
|
||||
|
@ -826,7 +851,8 @@ export function EditMessageModal(props: { onClose: () => void }) {
|
|||
type="text"
|
||||
value={session.topic}
|
||||
onInput={(e) =>
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.topic = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
|
@ -874,6 +900,12 @@ export function ShortcutKeyModal(props: { onClose: () => void }) {
|
|||
title: Locale.Chat.ShortcutKey.showShortcutKey,
|
||||
keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"],
|
||||
},
|
||||
{
|
||||
title: Locale.Chat.ShortcutKey.clearContext,
|
||||
keys: isMac
|
||||
? ["⌘", "Shift", "backspace"]
|
||||
: ["Ctrl", "Shift", "backspace"],
|
||||
},
|
||||
];
|
||||
return (
|
||||
<div className="modal-mask">
|
||||
|
@ -937,9 +969,24 @@ function _Chat() {
|
|||
(scrollRef.current.scrollTop + scrollRef.current.clientHeight),
|
||||
) <= 1
|
||||
: false;
|
||||
const isAttachWithTop = useMemo(() => {
|
||||
const lastMessage = scrollRef.current?.lastElementChild as HTMLElement;
|
||||
// if scrolllRef is not ready or no message, return false
|
||||
if (!scrollRef?.current || !lastMessage) return false;
|
||||
const topDistance =
|
||||
lastMessage!.getBoundingClientRect().top -
|
||||
scrollRef.current.getBoundingClientRect().top;
|
||||
// leave some space for user question
|
||||
return topDistance < 100;
|
||||
}, [scrollRef?.current?.scrollHeight]);
|
||||
|
||||
const isTyping = userInput !== "";
|
||||
|
||||
// if user is typing, should auto scroll to bottom
|
||||
// if user is not typing, should auto scroll to bottom only if already at bottom
|
||||
const { setAutoScroll, scrollDomToBottom } = useScrollToBottom(
|
||||
scrollRef,
|
||||
isScrolledToBottom,
|
||||
(isScrolledToBottom || isAttachWithTop) && !isTyping,
|
||||
);
|
||||
const [hitBottom, setHitBottom] = useState(true);
|
||||
const isMobileScreen = useMobileScreen();
|
||||
|
@ -987,7 +1034,8 @@ function _Chat() {
|
|||
prev: () => chatStore.nextSession(-1),
|
||||
next: () => chatStore.nextSession(1),
|
||||
clear: () =>
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => (session.clearContextIndex = session.messages.length),
|
||||
),
|
||||
fork: () => chatStore.forkSession(),
|
||||
|
@ -1015,7 +1063,7 @@ function _Chat() {
|
|||
};
|
||||
|
||||
const doSubmit = (userInput: string) => {
|
||||
if (userInput.trim() === "") return;
|
||||
if (userInput.trim() === "" && isEmpty(attachImages)) return;
|
||||
const matchCommand = chatCommands.match(userInput);
|
||||
if (matchCommand.matched) {
|
||||
setUserInput("");
|
||||
|
@ -1058,7 +1106,7 @@ function _Chat() {
|
|||
};
|
||||
|
||||
useEffect(() => {
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
const stopTiming = Date.now() - REQUEST_TIMEOUT_MS;
|
||||
session.messages.forEach((m) => {
|
||||
// check if should stop all stale messages
|
||||
|
@ -1084,7 +1132,7 @@ function _Chat() {
|
|||
}
|
||||
});
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
}, [session]);
|
||||
|
||||
// check if should send message
|
||||
const onInputKeyDown = (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
|
@ -1115,7 +1163,8 @@ function _Chat() {
|
|||
};
|
||||
|
||||
const deleteMessage = (msgId?: string) => {
|
||||
chatStore.updateCurrentSession(
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) =>
|
||||
(session.messages = session.messages.filter((m) => m.id !== msgId)),
|
||||
);
|
||||
|
@ -1182,7 +1231,7 @@ function _Chat() {
|
|||
};
|
||||
|
||||
const onPinMessage = (message: ChatMessage) => {
|
||||
chatStore.updateCurrentSession((session) =>
|
||||
chatStore.updateTargetSession(session, (session) =>
|
||||
session.mask.context.push(message),
|
||||
);
|
||||
|
||||
|
@ -1509,7 +1558,7 @@ function _Chat() {
|
|||
const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (event: any) => {
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
// 打开新聊天 command + shift + o
|
||||
if (
|
||||
(event.metaKey || event.ctrlKey) &&
|
||||
|
@ -1560,16 +1609,35 @@ function _Chat() {
|
|||
event.preventDefault();
|
||||
setShowShortcutKeyModal(true);
|
||||
}
|
||||
// 清除上下文 command + shift + backspace
|
||||
else if (
|
||||
(event.metaKey || event.ctrlKey) &&
|
||||
event.shiftKey &&
|
||||
event.key.toLowerCase() === "backspace"
|
||||
) {
|
||||
event.preventDefault();
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
if (session.clearContextIndex === session.messages.length) {
|
||||
session.clearContextIndex = undefined;
|
||||
} else {
|
||||
session.clearContextIndex = session.messages.length;
|
||||
session.memoryPrompt = ""; // will clear memory
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
window.addEventListener("keydown", handleKeyDown);
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
|
||||
return () => {
|
||||
window.removeEventListener("keydown", handleKeyDown);
|
||||
document.removeEventListener("keydown", handleKeyDown);
|
||||
};
|
||||
}, [messages, chatStore, navigate]);
|
||||
}, [messages, chatStore, navigate, session]);
|
||||
|
||||
const [showChatSidePanel, setShowChatSidePanel] = useState(false);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className={styles.chat} key={session.id}>
|
||||
<div className="window-header" data-tauri-drag-region>
|
||||
{isMobileScreen && (
|
||||
|
@ -1585,9 +1653,14 @@ function _Chat() {
|
|||
</div>
|
||||
)}
|
||||
|
||||
<div className={`window-header-title ${styles["chat-body-title"]}`}>
|
||||
<div
|
||||
className={`window-header-main-title ${styles["chat-body-main-title"]}`}
|
||||
className={clsx("window-header-title", styles["chat-body-title"])}
|
||||
>
|
||||
<div
|
||||
className={clsx(
|
||||
"window-header-main-title",
|
||||
styles["chat-body-main-title"],
|
||||
)}
|
||||
onClickCapture={() => setIsEditingMessage(true)}
|
||||
>
|
||||
{!session.topic ? DEFAULT_TOPIC : session.topic}
|
||||
|
@ -1604,7 +1677,7 @@ function _Chat() {
|
|||
title={Locale.Chat.Actions.RefreshTitle}
|
||||
onClick={() => {
|
||||
showToast(Locale.Chat.Actions.RefreshToast);
|
||||
chatStore.summarizeSession(true);
|
||||
chatStore.summarizeSession(true, session);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
|
@ -1652,7 +1725,8 @@ function _Chat() {
|
|||
setShowModal={setShowPromptModal}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className={styles["chat-main"]}>
|
||||
<div className={styles["chat-body-container"]}>
|
||||
<div
|
||||
className={styles["chat-body"]}
|
||||
ref={scrollRef}
|
||||
|
@ -1672,13 +1746,16 @@ function _Chat() {
|
|||
!isContext;
|
||||
const showTyping = message.preview || message.streaming;
|
||||
|
||||
const shouldShowClearContextDivider = i === clearContextIndex - 1;
|
||||
const shouldShowClearContextDivider =
|
||||
i === clearContextIndex - 1;
|
||||
|
||||
return (
|
||||
<Fragment key={message.id}>
|
||||
<div
|
||||
className={
|
||||
isUser ? styles["chat-message-user"] : styles["chat-message"]
|
||||
isUser
|
||||
? styles["chat-message-user"]
|
||||
: styles["chat-message"]
|
||||
}
|
||||
>
|
||||
<div className={styles["chat-message-container"]}>
|
||||
|
@ -1698,7 +1775,9 @@ function _Chat() {
|
|||
newMessage;
|
||||
const images = getMessageImages(message);
|
||||
if (images.length > 0) {
|
||||
newContent = [{ type: "text", text: newMessage }];
|
||||
newContent = [
|
||||
{ type: "text", text: newMessage },
|
||||
];
|
||||
for (let i = 0; i < images.length; i++) {
|
||||
newContent.push({
|
||||
type: "image_url",
|
||||
|
@ -1708,14 +1787,17 @@ function _Chat() {
|
|||
});
|
||||
}
|
||||
}
|
||||
chatStore.updateCurrentSession((session) => {
|
||||
chatStore.updateTargetSession(
|
||||
session,
|
||||
(session) => {
|
||||
const m = session.mask.context
|
||||
.concat(session.messages)
|
||||
.find((m) => m.id === message.id);
|
||||
if (m) {
|
||||
m.content = newContent;
|
||||
}
|
||||
});
|
||||
},
|
||||
);
|
||||
}}
|
||||
></IconButton>
|
||||
</div>
|
||||
|
@ -1729,7 +1811,8 @@ function _Chat() {
|
|||
<MaskAvatar
|
||||
avatar={session.mask.avatar}
|
||||
model={
|
||||
message.model || session.mask.modelConfig.model
|
||||
message.model ||
|
||||
session.mask.modelConfig.model
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
@ -1794,7 +1877,9 @@ function _Chat() {
|
|||
)
|
||||
}
|
||||
onClick={() =>
|
||||
openaiSpeech(getMessageTextContent(message))
|
||||
openaiSpeech(
|
||||
getMessageTextContent(message),
|
||||
)
|
||||
}
|
||||
/>
|
||||
)}
|
||||
|
@ -1815,6 +1900,7 @@ function _Chat() {
|
|||
{message?.tools?.map((tool) => (
|
||||
<div
|
||||
key={tool.id}
|
||||
title={tool?.errorMsg}
|
||||
className={styles["chat-message-tool"]}
|
||||
>
|
||||
{tool.isError === false ? (
|
||||
|
@ -1860,7 +1946,8 @@ function _Chat() {
|
|||
className={styles["chat-message-item-images"]}
|
||||
style={
|
||||
{
|
||||
"--image-count": getMessageImages(message).length,
|
||||
"--image-count":
|
||||
getMessageImages(message).length,
|
||||
} as React.CSSProperties
|
||||
}
|
||||
>
|
||||
|
@ -1879,6 +1966,11 @@ function _Chat() {
|
|||
</div>
|
||||
)}
|
||||
</div>
|
||||
{message?.audio_url && (
|
||||
<div className={styles["chat-message-audio"]}>
|
||||
<audio src={message.audio_url} controls />
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className={styles["chat-message-action-date"]}>
|
||||
{isContext
|
||||
|
@ -1892,9 +1984,11 @@ function _Chat() {
|
|||
);
|
||||
})}
|
||||
</div>
|
||||
|
||||
<div className={styles["chat-input-panel"]}>
|
||||
<PromptHints prompts={promptHints} onPromptSelect={onPromptSelect} />
|
||||
<PromptHints
|
||||
prompts={promptHints}
|
||||
onPromptSelect={onPromptSelect}
|
||||
/>
|
||||
|
||||
<ChatActions
|
||||
uploadImage={uploadImage}
|
||||
|
@ -1917,13 +2011,13 @@ function _Chat() {
|
|||
}}
|
||||
setShowShortcutKeyModal={setShowShortcutKeyModal}
|
||||
setUserInput={setUserInput}
|
||||
setShowChatSidePanel={setShowChatSidePanel}
|
||||
/>
|
||||
<label
|
||||
className={`${styles["chat-input-panel-inner"]} ${
|
||||
attachImages.length != 0
|
||||
? styles["chat-input-panel-inner-attach"]
|
||||
: ""
|
||||
}`}
|
||||
className={clsx(styles["chat-input-panel-inner"], {
|
||||
[styles["chat-input-panel-inner-attach"]]:
|
||||
attachImages.length !== 0,
|
||||
})}
|
||||
htmlFor="chat-input"
|
||||
>
|
||||
<textarea
|
||||
|
@ -1976,7 +2070,26 @@ function _Chat() {
|
|||
/>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div
|
||||
className={clsx(styles["chat-side-panel"], {
|
||||
[styles["mobile"]]: isMobileScreen,
|
||||
[styles["chat-side-panel-show"]]: showChatSidePanel,
|
||||
})}
|
||||
>
|
||||
{showChatSidePanel && (
|
||||
<RealtimeChat
|
||||
onClose={() => {
|
||||
setShowChatSidePanel(false);
|
||||
}}
|
||||
onStartVoice={async () => {
|
||||
console.log("start voice");
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{showExport && (
|
||||
<ExportMessageModal onClose={() => setShowExport(false)} />
|
||||
)}
|
||||
|
@ -1992,12 +2105,12 @@ function _Chat() {
|
|||
{showShortcutKeyModal && (
|
||||
<ShortcutKeyModal onClose={() => setShowShortcutKeyModal(false)} />
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
export function Chat() {
|
||||
const chatStore = useChatStore();
|
||||
const sessionIndex = chatStore.currentSessionIndex;
|
||||
return <_Chat key={sessionIndex}></_Chat>;
|
||||
const session = chatStore.currentSession();
|
||||
return <_Chat key={session.id}></_Chat>;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||
return (
|
||||
<div className="no-dark">
|
||||
{props.model?.startsWith("gpt-4") ||
|
||||
props.model?.startsWith("chatgpt-4o") ? (
|
||||
props.model?.startsWith("chatgpt-4o") ||
|
||||
props.model?.startsWith("o1") ? (
|
||||
<BlackBotIcon className="user-avatar" />
|
||||
) : (
|
||||
<BotIcon className="user-avatar" />
|
||||
|
|
|
@ -40,6 +40,7 @@ import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
|
|||
import { getClientConfig } from "../config/client";
|
||||
import { type ClientApi, getClientApi } from "../client/api";
|
||||
import { getMessageTextContent } from "../utils";
|
||||
import clsx from "clsx";
|
||||
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
loading: () => <LoadingIcon />,
|
||||
|
@ -118,9 +119,10 @@ function Steps<
|
|||
return (
|
||||
<div
|
||||
key={i}
|
||||
className={`${styles["step"]} ${
|
||||
styles[i <= props.index ? "step-finished" : ""]
|
||||
} ${i === props.index && styles["step-current"]} clickable`}
|
||||
className={clsx("clickable", styles["step"], {
|
||||
[styles["step-finished"]]: i <= props.index,
|
||||
[styles["step-current"]]: i === props.index,
|
||||
})}
|
||||
onClick={() => {
|
||||
props.onStepChange?.(i);
|
||||
}}
|
||||
|
@ -525,11 +527,11 @@ export function ImagePreviewer(props: {
|
|||
messages={props.messages}
|
||||
/>
|
||||
<div
|
||||
className={`${styles["preview-body"]} ${styles["default-theme"]}`}
|
||||
className={clsx(styles["preview-body"], styles["default-theme"])}
|
||||
ref={previewRef}
|
||||
>
|
||||
<div className={styles["chat-info"]}>
|
||||
<div className={styles["logo"] + " no-dark"}>
|
||||
<div className={clsx(styles["logo"], "no-dark")}>
|
||||
<NextImage
|
||||
src={ChatGptIcon.src}
|
||||
alt="logo"
|
||||
|
@ -570,7 +572,7 @@ export function ImagePreviewer(props: {
|
|||
{props.messages.map((m, i) => {
|
||||
return (
|
||||
<div
|
||||
className={styles["message"] + " " + styles["message-" + m.role]}
|
||||
className={clsx(styles["message"], styles["message-" + m.role])}
|
||||
key={i}
|
||||
>
|
||||
<div className={styles["avatar"]}>
|
||||
|
|
|
@ -140,6 +140,9 @@
|
|||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
&-narrow {
|
||||
justify-content: center;
|
||||
}
|
||||
}
|
||||
|
||||
.sidebar-logo {
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
require("../polyfill");
|
||||
|
||||
import { useState, useEffect } from "react";
|
||||
|
||||
import styles from "./home.module.scss";
|
||||
|
||||
import BotIcon from "../icons/bot.svg";
|
||||
|
@ -29,10 +28,11 @@ import { AuthPage } from "./auth";
|
|||
import { getClientConfig } from "../config/client";
|
||||
import { type ClientApi, getClientApi } from "../client/api";
|
||||
import { useAccessStore } from "../store";
|
||||
import clsx from "clsx";
|
||||
|
||||
export function Loading(props: { noLogo?: boolean }) {
|
||||
return (
|
||||
<div className={styles["loading-content"] + " no-dark"}>
|
||||
<div className={clsx("no-dark", styles["loading-content"])}>
|
||||
{!props.noLogo && <BotIcon />}
|
||||
<LoadingIcon />
|
||||
</div>
|
||||
|
@ -179,7 +179,11 @@ function Screen() {
|
|||
if (isSdNew) return <Sd />;
|
||||
return (
|
||||
<>
|
||||
<SideBar className={isHome ? styles["sidebar-show"] : ""} />
|
||||
<SideBar
|
||||
className={clsx({
|
||||
[styles["sidebar-show"]]: isHome,
|
||||
})}
|
||||
/>
|
||||
<WindowContent>
|
||||
<Routes>
|
||||
<Route path={Path.Home} element={<Chat />} />
|
||||
|
@ -197,9 +201,10 @@ function Screen() {
|
|||
|
||||
return (
|
||||
<div
|
||||
className={`${styles.container} ${
|
||||
shouldTightBorder ? styles["tight-container"] : styles.container
|
||||
} ${getLang() === "ar" ? styles["rtl-screen"] : ""}`}
|
||||
className={clsx(styles.container, {
|
||||
[styles["tight-container"]]: shouldTightBorder,
|
||||
[styles["rtl-screen"]]: getLang() === "ar",
|
||||
})}
|
||||
>
|
||||
{renderContent()}
|
||||
</div>
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import * as React from "react";
|
||||
import styles from "./input-range.module.scss";
|
||||
import clsx from "clsx";
|
||||
|
||||
interface InputRangeProps {
|
||||
onChange: React.ChangeEventHandler<HTMLInputElement>;
|
||||
|
@ -23,7 +24,7 @@ export function InputRange({
|
|||
aria,
|
||||
}: InputRangeProps) {
|
||||
return (
|
||||
<div className={styles["input-range"] + ` ${className ?? ""}`}>
|
||||
<div className={clsx(styles["input-range"], className)}>
|
||||
{title || value}
|
||||
<input
|
||||
aria-label={aria}
|
||||
|
|
|
@ -24,6 +24,7 @@ import { useChatStore } from "../store";
|
|||
import { IconButton } from "./button";
|
||||
|
||||
import { useAppConfig } from "../store/config";
|
||||
import clsx from "clsx";
|
||||
|
||||
export function Mermaid(props: { code: string }) {
|
||||
const ref = useRef<HTMLDivElement>(null);
|
||||
|
@ -58,7 +59,7 @@ export function Mermaid(props: { code: string }) {
|
|||
|
||||
return (
|
||||
<div
|
||||
className="no-dark mermaid"
|
||||
className={clsx("no-dark", "mermaid")}
|
||||
style={{
|
||||
cursor: "pointer",
|
||||
overflow: "auto",
|
||||
|
@ -90,7 +91,11 @@ export function PreCode(props: { children: any }) {
|
|||
const refText = ref.current.querySelector("code")?.innerText;
|
||||
if (htmlDom) {
|
||||
setHtmlCode((htmlDom as HTMLElement).innerText);
|
||||
} else if (refText?.startsWith("<!DOCTYPE")) {
|
||||
} else if (
|
||||
refText?.startsWith("<!DOCTYPE") ||
|
||||
refText?.startsWith("<svg") ||
|
||||
refText?.startsWith("<?xml")
|
||||
) {
|
||||
setHtmlCode(refText);
|
||||
}
|
||||
}, 600);
|
||||
|
@ -170,6 +175,12 @@ export function PreCode(props: { children: any }) {
|
|||
}
|
||||
|
||||
function CustomCode(props: { children: any; className?: string }) {
|
||||
const chatStore = useChatStore();
|
||||
const session = chatStore.currentSession();
|
||||
const config = useAppConfig();
|
||||
const enableCodeFold =
|
||||
session.mask?.enableCodeFold !== false && config.enableCodeFold;
|
||||
|
||||
const ref = useRef<HTMLPreElement>(null);
|
||||
const [collapsed, setCollapsed] = useState(true);
|
||||
const [showToggle, setShowToggle] = useState(false);
|
||||
|
@ -185,46 +196,39 @@ function CustomCode(props: { children: any; className?: string }) {
|
|||
const toggleCollapsed = () => {
|
||||
setCollapsed((collapsed) => !collapsed);
|
||||
};
|
||||
const renderShowMoreButton = () => {
|
||||
if (showToggle && enableCodeFold && collapsed) {
|
||||
return (
|
||||
<div
|
||||
className={clsx("show-hide-button", {
|
||||
collapsed,
|
||||
expanded: !collapsed,
|
||||
})}
|
||||
>
|
||||
<button onClick={toggleCollapsed}>{Locale.NewChat.More}</button>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return null;
|
||||
};
|
||||
return (
|
||||
<>
|
||||
<code
|
||||
className={props?.className}
|
||||
className={clsx(props?.className)}
|
||||
ref={ref}
|
||||
style={{
|
||||
maxHeight: collapsed ? "400px" : "none",
|
||||
maxHeight: enableCodeFold && collapsed ? "400px" : "none",
|
||||
overflowY: "hidden",
|
||||
}}
|
||||
>
|
||||
{props.children}
|
||||
</code>
|
||||
{showToggle && collapsed && (
|
||||
<div
|
||||
className={`show-hide-button ${collapsed ? "collapsed" : "expanded"}`}
|
||||
>
|
||||
<button onClick={toggleCollapsed}>{Locale.NewChat.More}</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{renderShowMoreButton()}
|
||||
</>
|
||||
);
|
||||
}
|
||||
|
||||
function escapeDollarNumber(text: string) {
|
||||
let escapedText = "";
|
||||
|
||||
for (let i = 0; i < text.length; i += 1) {
|
||||
let char = text[i];
|
||||
const nextChar = text[i + 1] || " ";
|
||||
|
||||
if (char === "$" && nextChar >= "0" && nextChar <= "9") {
|
||||
char = "\\$";
|
||||
}
|
||||
|
||||
escapedText += char;
|
||||
}
|
||||
|
||||
return escapedText;
|
||||
}
|
||||
|
||||
function escapeBrackets(text: string) {
|
||||
const pattern =
|
||||
/(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g;
|
||||
|
@ -245,6 +249,10 @@ function escapeBrackets(text: string) {
|
|||
|
||||
function tryWrapHtmlCode(text: string) {
|
||||
// try add wrap html code (fixed: html codeblock include 2 newline)
|
||||
// ignore embed codeblock
|
||||
if (text.includes("```")) {
|
||||
return text;
|
||||
}
|
||||
return text
|
||||
.replace(
|
||||
/([`]*?)(\w*?)([\n\r]*?)(<!DOCTYPE html>)/g,
|
||||
|
@ -253,7 +261,7 @@ function tryWrapHtmlCode(text: string) {
|
|||
},
|
||||
)
|
||||
.replace(
|
||||
/(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*?)([`]*?)([\n\r]*?)/g,
|
||||
/(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*)([`]*)([\n\r]*?)/g,
|
||||
(match, bodyEnd, space, htmlEnd, newLine, quoteEnd) => {
|
||||
return !quoteEnd ? bodyEnd + space + htmlEnd + "\n```\n" : match;
|
||||
},
|
||||
|
@ -262,7 +270,7 @@ function tryWrapHtmlCode(text: string) {
|
|||
|
||||
function _MarkDownContent(props: { content: string }) {
|
||||
const escapedContent = useMemo(() => {
|
||||
return tryWrapHtmlCode(escapeBrackets(escapeDollarNumber(props.content)));
|
||||
return tryWrapHtmlCode(escapeBrackets(props.content));
|
||||
}, [props.content]);
|
||||
|
||||
return (
|
||||
|
|
|
@ -55,6 +55,7 @@ import {
|
|||
OnDragEndResponder,
|
||||
} from "@hello-pangea/dnd";
|
||||
import { getMessageTextContent } from "../utils";
|
||||
import clsx from "clsx";
|
||||
|
||||
// drag and drop helper function
|
||||
function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] {
|
||||
|
@ -183,6 +184,23 @@ export function MaskConfig(props: {
|
|||
></input>
|
||||
</ListItem>
|
||||
)}
|
||||
{globalConfig.enableCodeFold && (
|
||||
<ListItem
|
||||
title={Locale.Mask.Config.CodeFold.Title}
|
||||
subTitle={Locale.Mask.Config.CodeFold.SubTitle}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Mask.Config.CodeFold.Title}
|
||||
type="checkbox"
|
||||
checked={props.mask.enableCodeFold !== false}
|
||||
onChange={(e) => {
|
||||
props.updateMask((mask) => {
|
||||
mask.enableCodeFold = e.currentTarget.checked;
|
||||
});
|
||||
}}
|
||||
></input>
|
||||
</ListItem>
|
||||
)}
|
||||
|
||||
{!props.shouldSyncFromGlobal ? (
|
||||
<ListItem
|
||||
|
@ -571,7 +589,7 @@ export function MaskPage() {
|
|||
</div>
|
||||
<div className={styles["mask-title"]}>
|
||||
<div className={styles["mask-name"]}>{m.name}</div>
|
||||
<div className={styles["mask-info"] + " one-line"}>
|
||||
<div className={clsx(styles["mask-info"], "one-line")}>
|
||||
{`${Locale.Mask.Item.Info(m.context.length)} / ${
|
||||
ALL_LANG_OPTIONS[m.lang]
|
||||
} / ${m.modelConfig.model}`}
|
||||
|
|
|
@ -8,6 +8,7 @@ import Locale from "../locales";
|
|||
|
||||
import styles from "./message-selector.module.scss";
|
||||
import { getMessageTextContent } from "../utils";
|
||||
import clsx from "clsx";
|
||||
|
||||
function useShiftRange() {
|
||||
const [startIndex, setStartIndex] = useState<number>();
|
||||
|
@ -71,6 +72,7 @@ export function MessageSelector(props: {
|
|||
defaultSelectAll?: boolean;
|
||||
onSelected?: (messages: ChatMessage[]) => void;
|
||||
}) {
|
||||
const LATEST_COUNT = 4;
|
||||
const chatStore = useChatStore();
|
||||
const session = chatStore.currentSession();
|
||||
const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming;
|
||||
|
@ -141,15 +143,13 @@ export function MessageSelector(props: {
|
|||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [startIndex, endIndex]);
|
||||
|
||||
const LATEST_COUNT = 4;
|
||||
|
||||
return (
|
||||
<div className={styles["message-selector"]}>
|
||||
<div className={styles["message-filter"]}>
|
||||
<input
|
||||
type="text"
|
||||
placeholder={Locale.Select.Search}
|
||||
className={styles["filter-item"] + " " + styles["search-bar"]}
|
||||
className={clsx(styles["filter-item"], styles["search-bar"])}
|
||||
value={searchInput}
|
||||
onInput={(e) => {
|
||||
setSearchInput(e.currentTarget.value);
|
||||
|
@ -196,9 +196,9 @@ export function MessageSelector(props: {
|
|||
|
||||
return (
|
||||
<div
|
||||
className={`${styles["message"]} ${
|
||||
props.selection.has(m.id!) && styles["message-selected"]
|
||||
}`}
|
||||
className={clsx(styles["message"], {
|
||||
[styles["message-selected"]]: props.selection.has(m.id!),
|
||||
})}
|
||||
key={i}
|
||||
onClick={() => {
|
||||
props.updateSelection((selection) => {
|
||||
|
@ -221,7 +221,7 @@ export function MessageSelector(props: {
|
|||
<div className={styles["date"]}>
|
||||
{new Date(m.date).toLocaleString()}
|
||||
</div>
|
||||
<div className={`${styles["content"]} one-line`}>
|
||||
<div className={clsx(styles["content"], "one-line")}>
|
||||
{getMessageTextContent(m)}
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -7,6 +7,7 @@ import { ListItem, Select } from "./ui-lib";
|
|||
import { useAllModels } from "../utils/hooks";
|
||||
import { groupBy } from "lodash-es";
|
||||
import styles from "./model-config.module.scss";
|
||||
import { getModelProvider } from "../utils/model";
|
||||
|
||||
export function ModelConfigList(props: {
|
||||
modelConfig: ModelConfig;
|
||||
|
@ -28,7 +29,9 @@ export function ModelConfigList(props: {
|
|||
value={value}
|
||||
align="left"
|
||||
onChange={(e) => {
|
||||
const [model, providerName] = e.currentTarget.value.split("@");
|
||||
const [model, providerName] = getModelProvider(
|
||||
e.currentTarget.value,
|
||||
);
|
||||
props.updateConfig((config) => {
|
||||
config.model = ModalConfigValidator.model(model);
|
||||
config.providerName = providerName as ServiceProvider;
|
||||
|
@ -247,7 +250,9 @@ export function ModelConfigList(props: {
|
|||
aria-label={Locale.Settings.CompressModel.Title}
|
||||
value={compressModelValue}
|
||||
onChange={(e) => {
|
||||
const [model, providerName] = e.currentTarget.value.split("@");
|
||||
const [model, providerName] = getModelProvider(
|
||||
e.currentTarget.value,
|
||||
);
|
||||
props.updateConfig((config) => {
|
||||
config.compressModel = ModalConfigValidator.model(model);
|
||||
config.compressProviderName = providerName as ServiceProvider;
|
||||
|
|
|
@ -16,6 +16,7 @@ import { MaskAvatar } from "./mask";
|
|||
import { useCommand } from "../command";
|
||||
import { showConfirm } from "./ui-lib";
|
||||
import { BUILTIN_MASK_STORE } from "../masks";
|
||||
import clsx from "clsx";
|
||||
|
||||
function MaskItem(props: { mask: Mask; onClick?: () => void }) {
|
||||
return (
|
||||
|
@ -24,7 +25,9 @@ function MaskItem(props: { mask: Mask; onClick?: () => void }) {
|
|||
avatar={props.mask.avatar}
|
||||
model={props.mask.modelConfig.model}
|
||||
/>
|
||||
<div className={styles["mask-name"] + " one-line"}>{props.mask.name}</div>
|
||||
<div className={clsx(styles["mask-name"], "one-line")}>
|
||||
{props.mask.name}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import {
|
|||
import Locale from "../locales";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { useState } from "react";
|
||||
import clsx from "clsx";
|
||||
|
||||
export function PluginPage() {
|
||||
const navigate = useNavigate();
|
||||
|
@ -199,7 +200,7 @@ export function PluginPage() {
|
|||
<div className={styles["mask-name"]}>
|
||||
{m.title}@<small>{m.version}</small>
|
||||
</div>
|
||||
<div className={styles["mask-info"] + " one-line"}>
|
||||
<div className={clsx(styles["mask-info"], "one-line")}>
|
||||
{Locale.Plugin.Item.Info(
|
||||
FunctionToolService.add(m).length,
|
||||
)}
|
||||
|
@ -335,7 +336,10 @@ export function PluginPage() {
|
|||
<ListItem
|
||||
subTitle={
|
||||
<div
|
||||
className={`markdown-body ${pluginStyles["plugin-content"]}`}
|
||||
className={clsx(
|
||||
"markdown-body",
|
||||
pluginStyles["plugin-content"],
|
||||
)}
|
||||
dir="auto"
|
||||
>
|
||||
<pre>
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
export * from "./realtime-chat";
|
|
@ -0,0 +1,74 @@
|
|||
.realtime-chat {
|
||||
width: 100%;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
height: 100%;
|
||||
padding: 20px;
|
||||
box-sizing: border-box;
|
||||
.circle-mic {
|
||||
width: 150px;
|
||||
height: 150px;
|
||||
border-radius: 50%;
|
||||
background: linear-gradient(to bottom right, #a0d8ef, #f0f8ff);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.icon-center {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
||||
.bottom-icons {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
position: absolute;
|
||||
bottom: 20px;
|
||||
box-sizing: border-box;
|
||||
padding: 0 20px;
|
||||
}
|
||||
|
||||
.icon-left,
|
||||
.icon-right {
|
||||
width: 46px;
|
||||
height: 46px;
|
||||
font-size: 36px;
|
||||
background: var(--second);
|
||||
border-radius: 50%;
|
||||
padding: 2px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
cursor: pointer;
|
||||
&:hover {
|
||||
opacity: 0.8;
|
||||
}
|
||||
}
|
||||
|
||||
&.mobile {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
.pulse {
|
||||
animation: pulse 1.5s infinite;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
0% {
|
||||
transform: scale(1);
|
||||
opacity: 0.7;
|
||||
}
|
||||
50% {
|
||||
transform: scale(1.1);
|
||||
opacity: 1;
|
||||
}
|
||||
100% {
|
||||
transform: scale(1);
|
||||
opacity: 0.7;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,359 @@
|
|||
import VoiceIcon from "@/app/icons/voice.svg";
|
||||
import VoiceOffIcon from "@/app/icons/voice-off.svg";
|
||||
import PowerIcon from "@/app/icons/power.svg";
|
||||
|
||||
import styles from "./realtime-chat.module.scss";
|
||||
import clsx from "clsx";
|
||||
|
||||
import { useState, useRef, useEffect } from "react";
|
||||
|
||||
import { useChatStore, createMessage, useAppConfig } from "@/app/store";
|
||||
|
||||
import { IconButton } from "@/app/components/button";
|
||||
|
||||
import {
|
||||
Modality,
|
||||
RTClient,
|
||||
RTInputAudioItem,
|
||||
RTResponse,
|
||||
TurnDetection,
|
||||
} from "rt-client";
|
||||
import { AudioHandler } from "@/app/lib/audio";
|
||||
import { uploadImage } from "@/app/utils/chat";
|
||||
import { VoicePrint } from "@/app/components/voice-print";
|
||||
|
||||
interface RealtimeChatProps {
|
||||
onClose?: () => void;
|
||||
onStartVoice?: () => void;
|
||||
onPausedVoice?: () => void;
|
||||
}
|
||||
|
||||
export function RealtimeChat({
|
||||
onClose,
|
||||
onStartVoice,
|
||||
onPausedVoice,
|
||||
}: RealtimeChatProps) {
|
||||
const chatStore = useChatStore();
|
||||
const session = chatStore.currentSession();
|
||||
const config = useAppConfig();
|
||||
const [status, setStatus] = useState("");
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const [isConnected, setIsConnected] = useState(false);
|
||||
const [isConnecting, setIsConnecting] = useState(false);
|
||||
const [modality, setModality] = useState("audio");
|
||||
const [useVAD, setUseVAD] = useState(true);
|
||||
const [frequencies, setFrequencies] = useState<Uint8Array | undefined>();
|
||||
|
||||
const clientRef = useRef<RTClient | null>(null);
|
||||
const audioHandlerRef = useRef<AudioHandler | null>(null);
|
||||
const initRef = useRef(false);
|
||||
|
||||
const temperature = config.realtimeConfig.temperature;
|
||||
const apiKey = config.realtimeConfig.apiKey;
|
||||
const model = config.realtimeConfig.model;
|
||||
const azure = config.realtimeConfig.provider === "Azure";
|
||||
const azureEndpoint = config.realtimeConfig.azure.endpoint;
|
||||
const azureDeployment = config.realtimeConfig.azure.deployment;
|
||||
const voice = config.realtimeConfig.voice;
|
||||
|
||||
const handleConnect = async () => {
|
||||
if (isConnecting) return;
|
||||
if (!isConnected) {
|
||||
try {
|
||||
setIsConnecting(true);
|
||||
clientRef.current = azure
|
||||
? new RTClient(
|
||||
new URL(azureEndpoint),
|
||||
{ key: apiKey },
|
||||
{ deployment: azureDeployment },
|
||||
)
|
||||
: new RTClient({ key: apiKey }, { model });
|
||||
const modalities: Modality[] =
|
||||
modality === "audio" ? ["text", "audio"] : ["text"];
|
||||
const turnDetection: TurnDetection = useVAD
|
||||
? { type: "server_vad" }
|
||||
: null;
|
||||
await clientRef.current.configure({
|
||||
instructions: "",
|
||||
voice,
|
||||
input_audio_transcription: { model: "whisper-1" },
|
||||
turn_detection: turnDetection,
|
||||
tools: [],
|
||||
temperature,
|
||||
modalities,
|
||||
});
|
||||
startResponseListener();
|
||||
|
||||
setIsConnected(true);
|
||||
// TODO
|
||||
// try {
|
||||
// const recentMessages = chatStore.getMessagesWithMemory();
|
||||
// for (const message of recentMessages) {
|
||||
// const { role, content } = message;
|
||||
// if (typeof content === "string") {
|
||||
// await clientRef.current.sendItem({
|
||||
// type: "message",
|
||||
// role: role as any,
|
||||
// content: [
|
||||
// {
|
||||
// type: (role === "assistant" ? "text" : "input_text") as any,
|
||||
// text: content as string,
|
||||
// },
|
||||
// ],
|
||||
// });
|
||||
// }
|
||||
// }
|
||||
// // await clientRef.current.generateResponse();
|
||||
// } catch (error) {
|
||||
// console.error("Set message failed:", error);
|
||||
// }
|
||||
} catch (error) {
|
||||
console.error("Connection failed:", error);
|
||||
setStatus("Connection failed");
|
||||
} finally {
|
||||
setIsConnecting(false);
|
||||
}
|
||||
} else {
|
||||
await disconnect();
|
||||
}
|
||||
};
|
||||
|
||||
const disconnect = async () => {
|
||||
if (clientRef.current) {
|
||||
try {
|
||||
await clientRef.current.close();
|
||||
clientRef.current = null;
|
||||
setIsConnected(false);
|
||||
} catch (error) {
|
||||
console.error("Disconnect failed:", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const startResponseListener = async () => {
|
||||
if (!clientRef.current) return;
|
||||
|
||||
try {
|
||||
for await (const serverEvent of clientRef.current.events()) {
|
||||
if (serverEvent.type === "response") {
|
||||
await handleResponse(serverEvent);
|
||||
} else if (serverEvent.type === "input_audio") {
|
||||
await handleInputAudio(serverEvent);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
if (clientRef.current) {
|
||||
console.error("Response iteration error:", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleResponse = async (response: RTResponse) => {
|
||||
for await (const item of response) {
|
||||
if (item.type === "message" && item.role === "assistant") {
|
||||
const botMessage = createMessage({
|
||||
role: item.role,
|
||||
content: "",
|
||||
});
|
||||
// add bot message first
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat([botMessage]);
|
||||
});
|
||||
let hasAudio = false;
|
||||
for await (const content of item) {
|
||||
if (content.type === "text") {
|
||||
for await (const text of content.textChunks()) {
|
||||
botMessage.content += text;
|
||||
}
|
||||
} else if (content.type === "audio") {
|
||||
const textTask = async () => {
|
||||
for await (const text of content.transcriptChunks()) {
|
||||
botMessage.content += text;
|
||||
}
|
||||
};
|
||||
const audioTask = async () => {
|
||||
audioHandlerRef.current?.startStreamingPlayback();
|
||||
for await (const audio of content.audioChunks()) {
|
||||
hasAudio = true;
|
||||
audioHandlerRef.current?.playChunk(audio);
|
||||
}
|
||||
};
|
||||
await Promise.all([textTask(), audioTask()]);
|
||||
}
|
||||
// update message.content
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
}
|
||||
if (hasAudio) {
|
||||
// upload audio get audio_url
|
||||
const blob = audioHandlerRef.current?.savePlayFile();
|
||||
uploadImage(blob!).then((audio_url) => {
|
||||
botMessage.audio_url = audio_url;
|
||||
// update text and audio_url
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleInputAudio = async (item: RTInputAudioItem) => {
|
||||
await item.waitForCompletion();
|
||||
if (item.transcription) {
|
||||
const userMessage = createMessage({
|
||||
role: "user",
|
||||
content: item.transcription,
|
||||
});
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat([userMessage]);
|
||||
});
|
||||
// save input audio_url, and update session
|
||||
const { audioStartMillis, audioEndMillis } = item;
|
||||
// upload audio get audio_url
|
||||
const blob = audioHandlerRef.current?.saveRecordFile(
|
||||
audioStartMillis,
|
||||
audioEndMillis,
|
||||
);
|
||||
uploadImage(blob!).then((audio_url) => {
|
||||
userMessage.audio_url = audio_url;
|
||||
chatStore.updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
});
|
||||
}
|
||||
// stop streaming play after get input audio.
|
||||
audioHandlerRef.current?.stopStreamingPlayback();
|
||||
};
|
||||
|
||||
const toggleRecording = async () => {
|
||||
if (!isRecording && clientRef.current) {
|
||||
try {
|
||||
if (!audioHandlerRef.current) {
|
||||
audioHandlerRef.current = new AudioHandler();
|
||||
await audioHandlerRef.current.initialize();
|
||||
}
|
||||
await audioHandlerRef.current.startRecording(async (chunk) => {
|
||||
await clientRef.current?.sendAudio(chunk);
|
||||
});
|
||||
setIsRecording(true);
|
||||
} catch (error) {
|
||||
console.error("Failed to start recording:", error);
|
||||
}
|
||||
} else if (audioHandlerRef.current) {
|
||||
try {
|
||||
audioHandlerRef.current.stopRecording();
|
||||
if (!useVAD) {
|
||||
const inputAudio = await clientRef.current?.commitAudio();
|
||||
await handleInputAudio(inputAudio!);
|
||||
await clientRef.current?.generateResponse();
|
||||
}
|
||||
setIsRecording(false);
|
||||
} catch (error) {
|
||||
console.error("Failed to stop recording:", error);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
// 防止重复初始化
|
||||
if (initRef.current) return;
|
||||
initRef.current = true;
|
||||
|
||||
const initAudioHandler = async () => {
|
||||
const handler = new AudioHandler();
|
||||
await handler.initialize();
|
||||
audioHandlerRef.current = handler;
|
||||
await handleConnect();
|
||||
await toggleRecording();
|
||||
};
|
||||
|
||||
initAudioHandler().catch((error) => {
|
||||
setStatus(error);
|
||||
console.error(error);
|
||||
});
|
||||
|
||||
return () => {
|
||||
if (isRecording) {
|
||||
toggleRecording();
|
||||
}
|
||||
audioHandlerRef.current?.close().catch(console.error);
|
||||
disconnect();
|
||||
};
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
let animationFrameId: number;
|
||||
|
||||
if (isConnected && isRecording) {
|
||||
const animationFrame = () => {
|
||||
if (audioHandlerRef.current) {
|
||||
const freqData = audioHandlerRef.current.getByteFrequencyData();
|
||||
setFrequencies(freqData);
|
||||
}
|
||||
animationFrameId = requestAnimationFrame(animationFrame);
|
||||
};
|
||||
|
||||
animationFrameId = requestAnimationFrame(animationFrame);
|
||||
} else {
|
||||
setFrequencies(undefined);
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (animationFrameId) {
|
||||
cancelAnimationFrame(animationFrameId);
|
||||
}
|
||||
};
|
||||
}, [isConnected, isRecording]);
|
||||
|
||||
// update session params
|
||||
useEffect(() => {
|
||||
clientRef.current?.configure({ voice });
|
||||
}, [voice]);
|
||||
useEffect(() => {
|
||||
clientRef.current?.configure({ temperature });
|
||||
}, [temperature]);
|
||||
|
||||
const handleClose = async () => {
|
||||
onClose?.();
|
||||
if (isRecording) {
|
||||
await toggleRecording();
|
||||
}
|
||||
disconnect().catch(console.error);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={styles["realtime-chat"]}>
|
||||
<div
|
||||
className={clsx(styles["circle-mic"], {
|
||||
[styles["pulse"]]: isRecording,
|
||||
})}
|
||||
>
|
||||
<VoicePrint frequencies={frequencies} isActive={isRecording} />
|
||||
</div>
|
||||
|
||||
<div className={styles["bottom-icons"]}>
|
||||
<div>
|
||||
<IconButton
|
||||
icon={isRecording ? <VoiceIcon /> : <VoiceOffIcon />}
|
||||
onClick={toggleRecording}
|
||||
disabled={!isConnected}
|
||||
shadow
|
||||
bordered
|
||||
/>
|
||||
</div>
|
||||
<div className={styles["icon-center"]}>{status}</div>
|
||||
<div>
|
||||
<IconButton
|
||||
icon={<PowerIcon />}
|
||||
onClick={handleClose}
|
||||
shadow
|
||||
bordered
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -0,0 +1,173 @@
|
|||
import { RealtimeConfig } from "@/app/store";
|
||||
|
||||
import Locale from "@/app/locales";
|
||||
import { ListItem, Select, PasswordInput } from "@/app/components/ui-lib";
|
||||
|
||||
import { InputRange } from "@/app/components/input-range";
|
||||
import { Voice } from "rt-client";
|
||||
import { ServiceProvider } from "@/app/constant";
|
||||
|
||||
const providers = [ServiceProvider.OpenAI, ServiceProvider.Azure];
|
||||
|
||||
const models = ["gpt-4o-realtime-preview-2024-10-01"];
|
||||
|
||||
const voice = ["alloy", "shimmer", "echo"];
|
||||
|
||||
export function RealtimeConfigList(props: {
|
||||
realtimeConfig: RealtimeConfig;
|
||||
updateConfig: (updater: (config: RealtimeConfig) => void) => void;
|
||||
}) {
|
||||
const azureConfigComponent = props.realtimeConfig.provider ===
|
||||
ServiceProvider.Azure && (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Azure.Endpoint.Title}
|
||||
subTitle={Locale.Settings.Realtime.Azure.Endpoint.SubTitle}
|
||||
>
|
||||
<input
|
||||
value={props.realtimeConfig?.azure?.endpoint}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Realtime.Azure.Endpoint.Title}
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) => (config.azure.endpoint = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Azure.Deployment.Title}
|
||||
subTitle={Locale.Settings.Realtime.Azure.Deployment.SubTitle}
|
||||
>
|
||||
<input
|
||||
value={props.realtimeConfig?.azure?.deployment}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Realtime.Azure.Deployment.Title}
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) => (config.azure.deployment = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
</>
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Enable.Title}
|
||||
subTitle={Locale.Settings.Realtime.Enable.SubTitle}
|
||||
>
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={props.realtimeConfig.enable}
|
||||
onChange={(e) =>
|
||||
props.updateConfig(
|
||||
(config) => (config.enable = e.currentTarget.checked),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
|
||||
{props.realtimeConfig.enable && (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Provider.Title}
|
||||
subTitle={Locale.Settings.Realtime.Provider.SubTitle}
|
||||
>
|
||||
<Select
|
||||
aria-label={Locale.Settings.Realtime.Provider.Title}
|
||||
value={props.realtimeConfig.provider}
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.provider = e.target.value as ServiceProvider),
|
||||
);
|
||||
}}
|
||||
>
|
||||
{providers.map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
</option>
|
||||
))}
|
||||
</Select>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Model.Title}
|
||||
subTitle={Locale.Settings.Realtime.Model.SubTitle}
|
||||
>
|
||||
<Select
|
||||
aria-label={Locale.Settings.Realtime.Model.Title}
|
||||
value={props.realtimeConfig.model}
|
||||
onChange={(e) => {
|
||||
props.updateConfig((config) => (config.model = e.target.value));
|
||||
}}
|
||||
>
|
||||
{models.map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
</option>
|
||||
))}
|
||||
</Select>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.ApiKey.Title}
|
||||
subTitle={Locale.Settings.Realtime.ApiKey.SubTitle}
|
||||
>
|
||||
<PasswordInput
|
||||
aria={Locale.Settings.ShowPassword}
|
||||
aria-label={Locale.Settings.Realtime.ApiKey.Title}
|
||||
value={props.realtimeConfig.apiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Realtime.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) => (config.apiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
{azureConfigComponent}
|
||||
<ListItem
|
||||
title={Locale.Settings.TTS.Voice.Title}
|
||||
subTitle={Locale.Settings.TTS.Voice.SubTitle}
|
||||
>
|
||||
<Select
|
||||
value={props.realtimeConfig.voice}
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) => (config.voice = e.currentTarget.value as Voice),
|
||||
);
|
||||
}}
|
||||
>
|
||||
{voice.map((v, i) => (
|
||||
<option value={v} key={i}>
|
||||
{v}
|
||||
</option>
|
||||
))}
|
||||
</Select>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Realtime.Temperature.Title}
|
||||
subTitle={Locale.Settings.Realtime.Temperature.SubTitle}
|
||||
>
|
||||
<InputRange
|
||||
aria={Locale.Settings.Temperature.Title}
|
||||
value={props.realtimeConfig?.temperature?.toFixed(1)}
|
||||
min="0.6"
|
||||
max="1"
|
||||
step="0.1"
|
||||
onChange={(e) => {
|
||||
props.updateConfig(
|
||||
(config) =>
|
||||
(config.temperature = e.currentTarget.valueAsNumber),
|
||||
);
|
||||
}}
|
||||
></InputRange>
|
||||
</ListItem>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
|
@ -4,6 +4,7 @@ import { Select } from "@/app/components/ui-lib";
|
|||
import { IconButton } from "@/app/components/button";
|
||||
import Locale from "@/app/locales";
|
||||
import { useSdStore } from "@/app/store/sd";
|
||||
import clsx from "clsx";
|
||||
|
||||
export const params = [
|
||||
{
|
||||
|
@ -136,7 +137,7 @@ export function ControlParamItem(props: {
|
|||
className?: string;
|
||||
}) {
|
||||
return (
|
||||
<div className={styles["ctrl-param-item"] + ` ${props.className || ""}`}>
|
||||
<div className={clsx(styles["ctrl-param-item"], props.className)}>
|
||||
<div className={styles["ctrl-param-item-header"]}>
|
||||
<div className={styles["ctrl-param-item-title"]}>
|
||||
<div>
|
||||
|
|
|
@ -36,6 +36,7 @@ import { removeImage } from "@/app/utils/chat";
|
|||
import { SideBar } from "./sd-sidebar";
|
||||
import { WindowContent } from "@/app/components/home";
|
||||
import { params } from "./sd-panel";
|
||||
import clsx from "clsx";
|
||||
|
||||
function getSdTaskStatus(item: any) {
|
||||
let s: string;
|
||||
|
@ -104,7 +105,7 @@ export function Sd() {
|
|||
|
||||
return (
|
||||
<>
|
||||
<SideBar className={isSd ? homeStyles["sidebar-show"] : ""} />
|
||||
<SideBar className={clsx({ [homeStyles["sidebar-show"]]: isSd })} />
|
||||
<WindowContent>
|
||||
<div className={chatStyles.chat} key={"1"}>
|
||||
<div className="window-header" data-tauri-drag-region>
|
||||
|
@ -121,7 +122,10 @@ export function Sd() {
|
|||
</div>
|
||||
)}
|
||||
<div
|
||||
className={`window-header-title ${chatStyles["chat-body-title"]}`}
|
||||
className={clsx(
|
||||
"window-header-title",
|
||||
chatStyles["chat-body-title"],
|
||||
)}
|
||||
>
|
||||
<div className={`window-header-main-title`}>Stability AI</div>
|
||||
<div className="window-header-sub-title">
|
||||
|
|
|
@ -49,7 +49,7 @@ import Locale, {
|
|||
changeLang,
|
||||
getLang,
|
||||
} from "../locales";
|
||||
import { copyToClipboard } from "../utils";
|
||||
import { copyToClipboard, clientUpdate, semverCompare } from "../utils";
|
||||
import Link from "next/link";
|
||||
import {
|
||||
Anthropic,
|
||||
|
@ -59,6 +59,7 @@ import {
|
|||
ByteDance,
|
||||
Alibaba,
|
||||
Moonshot,
|
||||
XAI,
|
||||
Google,
|
||||
GoogleSafetySettingsThreshold,
|
||||
OPENAI_BASE_URL,
|
||||
|
@ -71,6 +72,8 @@ import {
|
|||
Stability,
|
||||
Iflytek,
|
||||
SAAS_CHAT_URL,
|
||||
ChatGLM,
|
||||
DeepSeek,
|
||||
} from "../constant";
|
||||
import { Prompt, SearchService, usePromptStore } from "../store/prompt";
|
||||
import { ErrorBoundary } from "./error";
|
||||
|
@ -83,6 +86,7 @@ import { nanoid } from "nanoid";
|
|||
import { useMaskStore } from "../store/mask";
|
||||
import { ProviderType } from "../utils/cloud";
|
||||
import { TTSConfigList } from "./tts-config";
|
||||
import { RealtimeConfigList } from "./realtime-chat/realtime-config";
|
||||
|
||||
function EditPromptModal(props: { id: string; onClose: () => void }) {
|
||||
const promptStore = usePromptStore();
|
||||
|
@ -585,7 +589,7 @@ export function Settings() {
|
|||
const [checkingUpdate, setCheckingUpdate] = useState(false);
|
||||
const currentVersion = updateStore.formatVersion(updateStore.version);
|
||||
const remoteId = updateStore.formatVersion(updateStore.remoteVersion);
|
||||
const hasNewVersion = currentVersion !== remoteId;
|
||||
const hasNewVersion = semverCompare(currentVersion, remoteId) === -1;
|
||||
const updateUrl = getClientConfig()?.isApp ? RELEASE_URL : UPDATE_URL;
|
||||
|
||||
function checkUpdate(force = false) {
|
||||
|
@ -1194,6 +1198,127 @@ export function Settings() {
|
|||
</>
|
||||
);
|
||||
|
||||
const deepseekConfigComponent = accessStore.provider ===
|
||||
ServiceProvider.DeepSeek && (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.DeepSeek.Endpoint.Title}
|
||||
subTitle={
|
||||
Locale.Settings.Access.DeepSeek.Endpoint.SubTitle +
|
||||
DeepSeek.ExampleEndpoint
|
||||
}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Settings.Access.DeepSeek.Endpoint.Title}
|
||||
type="text"
|
||||
value={accessStore.deepseekUrl}
|
||||
placeholder={DeepSeek.ExampleEndpoint}
|
||||
onChange={(e) =>
|
||||
accessStore.update(
|
||||
(access) => (access.deepseekUrl = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.DeepSeek.ApiKey.Title}
|
||||
subTitle={Locale.Settings.Access.DeepSeek.ApiKey.SubTitle}
|
||||
>
|
||||
<PasswordInput
|
||||
aria-label={Locale.Settings.Access.DeepSeek.ApiKey.Title}
|
||||
value={accessStore.deepseekApiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Access.DeepSeek.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.deepseekApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
</>
|
||||
);
|
||||
|
||||
const XAIConfigComponent = accessStore.provider === ServiceProvider.XAI && (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.XAI.Endpoint.Title}
|
||||
subTitle={
|
||||
Locale.Settings.Access.XAI.Endpoint.SubTitle + XAI.ExampleEndpoint
|
||||
}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Settings.Access.XAI.Endpoint.Title}
|
||||
type="text"
|
||||
value={accessStore.xaiUrl}
|
||||
placeholder={XAI.ExampleEndpoint}
|
||||
onChange={(e) =>
|
||||
accessStore.update(
|
||||
(access) => (access.xaiUrl = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.XAI.ApiKey.Title}
|
||||
subTitle={Locale.Settings.Access.XAI.ApiKey.SubTitle}
|
||||
>
|
||||
<PasswordInput
|
||||
aria-label={Locale.Settings.Access.XAI.ApiKey.Title}
|
||||
value={accessStore.xaiApiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Access.XAI.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.xaiApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
</>
|
||||
);
|
||||
|
||||
const chatglmConfigComponent = accessStore.provider ===
|
||||
ServiceProvider.ChatGLM && (
|
||||
<>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.ChatGLM.Endpoint.Title}
|
||||
subTitle={
|
||||
Locale.Settings.Access.ChatGLM.Endpoint.SubTitle +
|
||||
ChatGLM.ExampleEndpoint
|
||||
}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Settings.Access.ChatGLM.Endpoint.Title}
|
||||
type="text"
|
||||
value={accessStore.chatglmUrl}
|
||||
placeholder={ChatGLM.ExampleEndpoint}
|
||||
onChange={(e) =>
|
||||
accessStore.update(
|
||||
(access) => (access.chatglmUrl = e.currentTarget.value),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Settings.Access.ChatGLM.ApiKey.Title}
|
||||
subTitle={Locale.Settings.Access.ChatGLM.ApiKey.SubTitle}
|
||||
>
|
||||
<PasswordInput
|
||||
aria-label={Locale.Settings.Access.ChatGLM.ApiKey.Title}
|
||||
value={accessStore.chatglmApiKey}
|
||||
type="text"
|
||||
placeholder={Locale.Settings.Access.ChatGLM.ApiKey.Placeholder}
|
||||
onChange={(e) => {
|
||||
accessStore.update(
|
||||
(access) => (access.chatglmApiKey = e.currentTarget.value),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
</>
|
||||
);
|
||||
|
||||
const stabilityConfigComponent = accessStore.provider ===
|
||||
ServiceProvider.Stability && (
|
||||
<>
|
||||
|
@ -1357,9 +1482,17 @@ export function Settings() {
|
|||
{checkingUpdate ? (
|
||||
<LoadingIcon />
|
||||
) : hasNewVersion ? (
|
||||
clientConfig?.isApp ? (
|
||||
<IconButton
|
||||
icon={<ResetIcon></ResetIcon>}
|
||||
text={Locale.Settings.Update.GoToUpdate}
|
||||
onClick={() => clientUpdate()}
|
||||
/>
|
||||
) : (
|
||||
<Link href={updateUrl} target="_blank" className="link">
|
||||
{Locale.Settings.Update.GoToUpdate}
|
||||
</Link>
|
||||
)
|
||||
) : (
|
||||
<IconButton
|
||||
icon={<ResetIcon></ResetIcon>}
|
||||
|
@ -1509,6 +1642,22 @@ export function Settings() {
|
|||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
<ListItem
|
||||
title={Locale.Mask.Config.CodeFold.Title}
|
||||
subTitle={Locale.Mask.Config.CodeFold.SubTitle}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Mask.Config.CodeFold.Title}
|
||||
type="checkbox"
|
||||
checked={config.enableCodeFold}
|
||||
data-testid="enable-code-fold-checkbox"
|
||||
onChange={(e) =>
|
||||
updateConfig(
|
||||
(config) => (config.enableCodeFold = e.currentTarget.checked),
|
||||
)
|
||||
}
|
||||
></input>
|
||||
</ListItem>
|
||||
</List>
|
||||
|
||||
<SyncItems />
|
||||
|
@ -1626,8 +1775,11 @@ export function Settings() {
|
|||
{alibabaConfigComponent}
|
||||
{tencentConfigComponent}
|
||||
{moonshotConfigComponent}
|
||||
{deepseekConfigComponent}
|
||||
{stabilityConfigComponent}
|
||||
{lflytekConfigComponent}
|
||||
{XAIConfigComponent}
|
||||
{chatglmConfigComponent}
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
|
@ -1662,9 +1814,11 @@ export function Settings() {
|
|||
<ListItem
|
||||
title={Locale.Settings.Access.CustomModel.Title}
|
||||
subTitle={Locale.Settings.Access.CustomModel.SubTitle}
|
||||
vertical={true}
|
||||
>
|
||||
<input
|
||||
aria-label={Locale.Settings.Access.CustomModel.Title}
|
||||
style={{ width: "100%", maxWidth: "unset", textAlign: "left" }}
|
||||
type="text"
|
||||
value={config.customModels}
|
||||
placeholder="model1,model2,model3"
|
||||
|
@ -1691,7 +1845,18 @@ export function Settings() {
|
|||
{shouldShowPromptModal && (
|
||||
<UserPromptModal onClose={() => setShowPromptModal(false)} />
|
||||
)}
|
||||
|
||||
<List>
|
||||
<RealtimeConfigList
|
||||
realtimeConfig={config.realtimeConfig}
|
||||
updateConfig={(updater) => {
|
||||
const realtimeConfig = { ...config.realtimeConfig };
|
||||
updater(realtimeConfig);
|
||||
config.update(
|
||||
(config) => (config.realtimeConfig = realtimeConfig),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</List>
|
||||
<List>
|
||||
<TTSConfigList
|
||||
ttsConfig={config.ttsConfig}
|
||||
|
|
|
@ -22,7 +22,6 @@ import {
|
|||
MIN_SIDEBAR_WIDTH,
|
||||
NARROW_SIDEBAR_WIDTH,
|
||||
Path,
|
||||
PLUGINS,
|
||||
REPO_URL,
|
||||
} from "../constant";
|
||||
|
||||
|
@ -30,6 +29,13 @@ import { Link, useNavigate } from "react-router-dom";
|
|||
import { isIOS, useMobileScreen } from "../utils";
|
||||
import dynamic from "next/dynamic";
|
||||
import { showConfirm, Selector } from "./ui-lib";
|
||||
import clsx from "clsx";
|
||||
|
||||
const DISCOVERY = [
|
||||
{ name: Locale.Plugin.Name, path: Path.Plugins },
|
||||
{ name: "Stable Diffusion", path: Path.Sd },
|
||||
{ name: Locale.SearchChat.Page.Title, path: Path.SearchChat },
|
||||
];
|
||||
|
||||
const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, {
|
||||
loading: () => null,
|
||||
|
@ -141,9 +147,9 @@ export function SideBarContainer(props: {
|
|||
const { children, className, onDragStart, shouldNarrow } = props;
|
||||
return (
|
||||
<div
|
||||
className={`${styles.sidebar} ${className} ${
|
||||
shouldNarrow && styles["narrow-sidebar"]
|
||||
}`}
|
||||
className={clsx(styles.sidebar, className, {
|
||||
[styles["narrow-sidebar"]]: shouldNarrow,
|
||||
})}
|
||||
style={{
|
||||
// #3016 disable transition on ios mobile screen
|
||||
transition: isMobileScreen && isIOSMobile ? "none" : undefined,
|
||||
|
@ -165,18 +171,24 @@ export function SideBarHeader(props: {
|
|||
subTitle?: string | React.ReactNode;
|
||||
logo?: React.ReactNode;
|
||||
children?: React.ReactNode;
|
||||
shouldNarrow?: boolean;
|
||||
}) {
|
||||
const { title, subTitle, logo, children } = props;
|
||||
const { title, subTitle, logo, children, shouldNarrow } = props;
|
||||
return (
|
||||
<Fragment>
|
||||
<div className={styles["sidebar-header"]} data-tauri-drag-region>
|
||||
<div
|
||||
className={clsx(styles["sidebar-header"], {
|
||||
[styles["sidebar-header-narrow"]]: shouldNarrow,
|
||||
})}
|
||||
data-tauri-drag-region
|
||||
>
|
||||
<div className={styles["sidebar-title-container"]}>
|
||||
<div className={styles["sidebar-title"]} data-tauri-drag-region>
|
||||
{title}
|
||||
</div>
|
||||
<div className={styles["sidebar-sub-title"]}>{subTitle}</div>
|
||||
</div>
|
||||
<div className={styles["sidebar-logo"] + " no-dark"}>{logo}</div>
|
||||
<div className={clsx(styles["sidebar-logo"], "no-dark")}>{logo}</div>
|
||||
</div>
|
||||
{children}
|
||||
</Fragment>
|
||||
|
@ -212,7 +224,7 @@ export function SideBarTail(props: {
|
|||
export function SideBar(props: { className?: string }) {
|
||||
useHotKey();
|
||||
const { onDragStart, shouldNarrow } = useDragSideBar();
|
||||
const [showPluginSelector, setShowPluginSelector] = useState(false);
|
||||
const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
|
||||
const navigate = useNavigate();
|
||||
const config = useAppConfig();
|
||||
const chatStore = useChatStore();
|
||||
|
@ -227,6 +239,7 @@ export function SideBar(props: { className?: string }) {
|
|||
title="NextChat"
|
||||
subTitle="Build your own AI assistant."
|
||||
logo={<ChatGptIcon />}
|
||||
shouldNarrow={shouldNarrow}
|
||||
>
|
||||
<div className={styles["sidebar-header-bar"]}>
|
||||
<IconButton
|
||||
|
@ -246,21 +259,21 @@ export function SideBar(props: { className?: string }) {
|
|||
icon={<DiscoveryIcon />}
|
||||
text={shouldNarrow ? undefined : Locale.Discovery.Name}
|
||||
className={styles["sidebar-bar-button"]}
|
||||
onClick={() => setShowPluginSelector(true)}
|
||||
onClick={() => setshowDiscoverySelector(true)}
|
||||
shadow
|
||||
/>
|
||||
</div>
|
||||
{showPluginSelector && (
|
||||
{showDiscoverySelector && (
|
||||
<Selector
|
||||
items={[
|
||||
...PLUGINS.map((item) => {
|
||||
...DISCOVERY.map((item) => {
|
||||
return {
|
||||
title: item.name,
|
||||
value: item.path,
|
||||
};
|
||||
}),
|
||||
]}
|
||||
onClose={() => setShowPluginSelector(false)}
|
||||
onClose={() => setshowDiscoverySelector(false)}
|
||||
onSelection={(s) => {
|
||||
navigate(s[0], { state: { fromHome: true } });
|
||||
}}
|
||||
|
@ -279,7 +292,7 @@ export function SideBar(props: { className?: string }) {
|
|||
<SideBarTail
|
||||
primaryAction={
|
||||
<>
|
||||
<div className={styles["sidebar-action"] + " " + styles.mobile}>
|
||||
<div className={clsx(styles["sidebar-action"], styles.mobile)}>
|
||||
<IconButton
|
||||
icon={<DeleteIcon />}
|
||||
onClick={async () => {
|
||||
|
|
|
@ -23,6 +23,7 @@ import React, {
|
|||
useRef,
|
||||
} from "react";
|
||||
import { IconButton } from "./button";
|
||||
import clsx from "clsx";
|
||||
|
||||
export function Popover(props: {
|
||||
children: JSX.Element;
|
||||
|
@ -45,7 +46,7 @@ export function Popover(props: {
|
|||
|
||||
export function Card(props: { children: JSX.Element[]; className?: string }) {
|
||||
return (
|
||||
<div className={styles.card + " " + props.className}>{props.children}</div>
|
||||
<div className={clsx(styles.card, props.className)}>{props.children}</div>
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -60,11 +61,13 @@ export function ListItem(props: {
|
|||
}) {
|
||||
return (
|
||||
<div
|
||||
className={
|
||||
styles["list-item"] +
|
||||
` ${props.vertical ? styles["vertical"] : ""} ` +
|
||||
` ${props.className || ""}`
|
||||
}
|
||||
className={clsx(
|
||||
styles["list-item"],
|
||||
{
|
||||
[styles["vertical"]]: props.vertical,
|
||||
},
|
||||
props.className,
|
||||
)}
|
||||
onClick={props.onClick}
|
||||
>
|
||||
<div className={styles["list-header"]}>
|
||||
|
@ -135,9 +138,9 @@ export function Modal(props: ModalProps) {
|
|||
|
||||
return (
|
||||
<div
|
||||
className={
|
||||
styles["modal-container"] + ` ${isMax && styles["modal-container-max"]}`
|
||||
}
|
||||
className={clsx(styles["modal-container"], {
|
||||
[styles["modal-container-max"]]: isMax,
|
||||
})}
|
||||
>
|
||||
<div className={styles["modal-header"]}>
|
||||
<div className={styles["modal-title"]}>{props.title}</div>
|
||||
|
@ -260,7 +263,7 @@ export function Input(props: InputProps) {
|
|||
return (
|
||||
<textarea
|
||||
{...props}
|
||||
className={`${styles["input"]} ${props.className}`}
|
||||
className={clsx(styles["input"], props.className)}
|
||||
></textarea>
|
||||
);
|
||||
}
|
||||
|
@ -301,9 +304,13 @@ export function Select(
|
|||
const { className, children, align, ...otherProps } = props;
|
||||
return (
|
||||
<div
|
||||
className={`${styles["select-with-icon"]} ${
|
||||
align === "left" ? styles["left-align-option"] : ""
|
||||
} ${className}`}
|
||||
className={clsx(
|
||||
styles["select-with-icon"],
|
||||
{
|
||||
[styles["left-align-option"]]: align === "left",
|
||||
},
|
||||
className,
|
||||
)}
|
||||
>
|
||||
<select className={styles["select-with-icon-select"]} {...otherProps}>
|
||||
{children}
|
||||
|
@ -509,9 +516,9 @@ export function Selector<T>(props: {
|
|||
const selected = selectedValues.includes(item.value);
|
||||
return (
|
||||
<ListItem
|
||||
className={`${styles["selector-item"]} ${
|
||||
item.disable && styles["selector-item-disabled"]
|
||||
}`}
|
||||
className={clsx(styles["selector-item"], {
|
||||
[styles["selector-item-disabled"]]: item.disable,
|
||||
})}
|
||||
key={i}
|
||||
title={item.title}
|
||||
subTitle={item.subTitle}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
export * from "./voice-print";
|
|
@ -0,0 +1,11 @@
|
|||
.voice-print {
|
||||
width: 100%;
|
||||
height: 60px;
|
||||
margin: 20px 0;
|
||||
|
||||
canvas {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
filter: brightness(1.2); // 增加整体亮度
|
||||
}
|
||||
}
|
|
@ -0,0 +1,180 @@
|
|||
import { useEffect, useRef, useCallback } from "react";
|
||||
import styles from "./voice-print.module.scss";
|
||||
|
||||
interface VoicePrintProps {
|
||||
frequencies?: Uint8Array;
|
||||
isActive?: boolean;
|
||||
}
|
||||
|
||||
export function VoicePrint({ frequencies, isActive }: VoicePrintProps) {
|
||||
// Canvas引用,用于获取绘图上下文
|
||||
const canvasRef = useRef<HTMLCanvasElement>(null);
|
||||
// 存储历史频率数据,用于平滑处理
|
||||
const historyRef = useRef<number[][]>([]);
|
||||
// 控制保留的历史数据帧数,影响平滑度
|
||||
const historyLengthRef = useRef(10);
|
||||
// 存储动画帧ID,用于清理
|
||||
const animationFrameRef = useRef<number>();
|
||||
|
||||
/**
|
||||
* 更新频率历史数据
|
||||
* 使用FIFO队列维护固定长度的历史记录
|
||||
*/
|
||||
const updateHistory = useCallback((freqArray: number[]) => {
|
||||
historyRef.current.push(freqArray);
|
||||
if (historyRef.current.length > historyLengthRef.current) {
|
||||
historyRef.current.shift();
|
||||
}
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const canvas = canvasRef.current;
|
||||
if (!canvas) return;
|
||||
|
||||
const ctx = canvas.getContext("2d");
|
||||
if (!ctx) return;
|
||||
|
||||
/**
|
||||
* 处理高DPI屏幕显示
|
||||
* 根据设备像素比例调整canvas实际渲染分辨率
|
||||
*/
|
||||
const dpr = window.devicePixelRatio || 1;
|
||||
canvas.width = canvas.offsetWidth * dpr;
|
||||
canvas.height = canvas.offsetHeight * dpr;
|
||||
ctx.scale(dpr, dpr);
|
||||
|
||||
/**
|
||||
* 主要绘制函数
|
||||
* 使用requestAnimationFrame实现平滑动画
|
||||
* 包含以下步骤:
|
||||
* 1. 清空画布
|
||||
* 2. 更新历史数据
|
||||
* 3. 计算波形点
|
||||
* 4. 绘制上下对称的声纹
|
||||
*/
|
||||
const draw = () => {
|
||||
// 清空画布
|
||||
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
||||
|
||||
if (!frequencies || !isActive) {
|
||||
historyRef.current = [];
|
||||
return;
|
||||
}
|
||||
|
||||
const freqArray = Array.from(frequencies);
|
||||
updateHistory(freqArray);
|
||||
|
||||
// 绘制声纹
|
||||
const points: [number, number][] = [];
|
||||
const centerY = canvas.height / 2;
|
||||
const width = canvas.width;
|
||||
const sliceWidth = width / (frequencies.length - 1);
|
||||
|
||||
// 绘制主波形
|
||||
ctx.beginPath();
|
||||
ctx.moveTo(0, centerY);
|
||||
|
||||
/**
|
||||
* 声纹绘制算法:
|
||||
* 1. 使用历史数据平均值实现平滑过渡
|
||||
* 2. 通过正弦函数添加自然波动
|
||||
* 3. 使用贝塞尔曲线连接点,使曲线更平滑
|
||||
* 4. 绘制对称部分形成完整声纹
|
||||
*/
|
||||
for (let i = 0; i < frequencies.length; i++) {
|
||||
const x = i * sliceWidth;
|
||||
let avgFrequency = frequencies[i];
|
||||
|
||||
/**
|
||||
* 波形平滑处理:
|
||||
* 1. 收集历史数据中对应位置的频率值
|
||||
* 2. 计算当前值与历史值的加权平均
|
||||
* 3. 根据平均值计算实际显示高度
|
||||
*/
|
||||
if (historyRef.current.length > 0) {
|
||||
const historicalValues = historyRef.current.map((h) => h[i] || 0);
|
||||
avgFrequency =
|
||||
(avgFrequency + historicalValues.reduce((a, b) => a + b, 0)) /
|
||||
(historyRef.current.length + 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* 波形变换:
|
||||
* 1. 归一化频率值到0-1范围
|
||||
* 2. 添加时间相关的正弦变换
|
||||
* 3. 使用贝塞尔曲线平滑连接点
|
||||
*/
|
||||
const normalized = avgFrequency / 255.0;
|
||||
const height = normalized * (canvas.height / 2);
|
||||
const y = centerY + height * Math.sin(i * 0.2 + Date.now() * 0.002);
|
||||
|
||||
points.push([x, y]);
|
||||
|
||||
if (i === 0) {
|
||||
ctx.moveTo(x, y);
|
||||
} else {
|
||||
// 使用贝塞尔曲线使波形更平滑
|
||||
const prevPoint = points[i - 1];
|
||||
const midX = (prevPoint[0] + x) / 2;
|
||||
ctx.quadraticCurveTo(
|
||||
prevPoint[0],
|
||||
prevPoint[1],
|
||||
midX,
|
||||
(prevPoint[1] + y) / 2,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// 绘制对称的下半部分
|
||||
for (let i = points.length - 1; i >= 0; i--) {
|
||||
const [x, y] = points[i];
|
||||
const symmetricY = centerY - (y - centerY);
|
||||
if (i === points.length - 1) {
|
||||
ctx.lineTo(x, symmetricY);
|
||||
} else {
|
||||
const nextPoint = points[i + 1];
|
||||
const midX = (nextPoint[0] + x) / 2;
|
||||
ctx.quadraticCurveTo(
|
||||
nextPoint[0],
|
||||
centerY - (nextPoint[1] - centerY),
|
||||
midX,
|
||||
centerY - ((nextPoint[1] + y) / 2 - centerY),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.closePath();
|
||||
|
||||
/**
|
||||
* 渐变效果:
|
||||
* 从左到右应用三色渐变,带透明度
|
||||
* 使用蓝色系配色提升视觉效果
|
||||
*/
|
||||
const gradient = ctx.createLinearGradient(0, 0, canvas.width, 0);
|
||||
gradient.addColorStop(0, "rgba(100, 180, 255, 0.95)");
|
||||
gradient.addColorStop(0.5, "rgba(140, 200, 255, 0.9)");
|
||||
gradient.addColorStop(1, "rgba(180, 220, 255, 0.95)");
|
||||
|
||||
ctx.fillStyle = gradient;
|
||||
ctx.fill();
|
||||
|
||||
animationFrameRef.current = requestAnimationFrame(draw);
|
||||
};
|
||||
|
||||
// 启动动画循环
|
||||
draw();
|
||||
|
||||
// 清理函数:在组件卸载时取消动画
|
||||
return () => {
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current);
|
||||
}
|
||||
};
|
||||
}, [frequencies, isActive, updateHistory]);
|
||||
|
||||
return (
|
||||
<div className={styles["voice-print"]}>
|
||||
<canvas ref={canvasRef} />
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
import md5 from "spark-md5";
|
||||
import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant";
|
||||
import { isGPT4Model } from "../utils/model";
|
||||
|
||||
declare global {
|
||||
namespace NodeJS {
|
||||
|
@ -22,6 +23,7 @@ declare global {
|
|||
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
|
||||
CUSTOM_MODELS?: string; // to control custom models
|
||||
DEFAULT_MODEL?: string; // to control default model in every new chat window
|
||||
VISION_MODELS?: string; // to control vision models
|
||||
|
||||
// stability only
|
||||
STABILITY_URL?: string;
|
||||
|
@ -71,6 +73,17 @@ declare global {
|
|||
IFLYTEK_API_KEY?: string;
|
||||
IFLYTEK_API_SECRET?: string;
|
||||
|
||||
DEEPSEEK_URL?: string;
|
||||
DEEPSEEK_API_KEY?: string;
|
||||
|
||||
// xai only
|
||||
XAI_URL?: string;
|
||||
XAI_API_KEY?: string;
|
||||
|
||||
// chatglm only
|
||||
CHATGLM_URL?: string;
|
||||
CHATGLM_API_KEY?: string;
|
||||
|
||||
// custom template for preprocessing user input
|
||||
DEFAULT_INPUT_TEMPLATE?: string;
|
||||
}
|
||||
|
@ -116,23 +129,17 @@ export const getServerSideConfig = () => {
|
|||
const disableGPT4 = !!process.env.DISABLE_GPT4;
|
||||
let customModels = process.env.CUSTOM_MODELS ?? "";
|
||||
let defaultModel = process.env.DEFAULT_MODEL ?? "";
|
||||
let visionModels = process.env.VISION_MODELS ?? "";
|
||||
|
||||
if (disableGPT4) {
|
||||
if (customModels) customModels += ",";
|
||||
customModels += DEFAULT_MODELS.filter(
|
||||
(m) =>
|
||||
(m.name.startsWith("gpt-4") || m.name.startsWith("chatgpt-4o")) &&
|
||||
!m.name.startsWith("gpt-4o-mini"),
|
||||
)
|
||||
customModels += DEFAULT_MODELS.filter((m) => isGPT4Model(m.name))
|
||||
.map((m) => "-" + m.name)
|
||||
.join(",");
|
||||
if (
|
||||
(defaultModel.startsWith("gpt-4") ||
|
||||
defaultModel.startsWith("chatgpt-4o")) &&
|
||||
!defaultModel.startsWith("gpt-4o-mini")
|
||||
)
|
||||
if (defaultModel && isGPT4Model(defaultModel)) {
|
||||
defaultModel = "";
|
||||
}
|
||||
}
|
||||
|
||||
const isStability = !!process.env.STABILITY_API_KEY;
|
||||
|
||||
|
@ -146,6 +153,9 @@ export const getServerSideConfig = () => {
|
|||
const isAlibaba = !!process.env.ALIBABA_API_KEY;
|
||||
const isMoonshot = !!process.env.MOONSHOT_API_KEY;
|
||||
const isIflytek = !!process.env.IFLYTEK_API_KEY;
|
||||
const isDeepSeek = !!process.env.DEEPSEEK_API_KEY;
|
||||
const isXAI = !!process.env.XAI_API_KEY;
|
||||
const isChatGLM = !!process.env.CHATGLM_API_KEY;
|
||||
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
|
||||
// const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
|
||||
// const randomIndex = Math.floor(Math.random() * apiKeys.length);
|
||||
|
@ -208,6 +218,18 @@ export const getServerSideConfig = () => {
|
|||
iflytekApiKey: process.env.IFLYTEK_API_KEY,
|
||||
iflytekApiSecret: process.env.IFLYTEK_API_SECRET,
|
||||
|
||||
isDeepSeek,
|
||||
deepseekUrl: process.env.DEEPSEEK_URL,
|
||||
deepseekApiKey: getApiKey(process.env.DEEPSEEK_API_KEY),
|
||||
|
||||
isXAI,
|
||||
xaiUrl: process.env.XAI_URL,
|
||||
xaiApiKey: getApiKey(process.env.XAI_API_KEY),
|
||||
|
||||
isChatGLM,
|
||||
chatglmUrl: process.env.CHATGLM_URL,
|
||||
chatglmApiKey: getApiKey(process.env.CHATGLM_API_KEY),
|
||||
|
||||
cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID,
|
||||
cloudflareKVNamespaceId: process.env.CLOUDFLARE_KV_NAMESPACE_ID,
|
||||
cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY),
|
||||
|
@ -229,6 +251,7 @@ export const getServerSideConfig = () => {
|
|||
disableFastLink: !!process.env.DISABLE_FAST_LINK,
|
||||
customModels,
|
||||
defaultModel,
|
||||
visionModels,
|
||||
allowedWebDavEndpoints,
|
||||
};
|
||||
};
|
||||
|
|
138
app/constant.ts
|
@ -11,7 +11,6 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
|
|||
|
||||
export const STABILITY_BASE_URL = "https://api.stability.ai";
|
||||
|
||||
export const DEFAULT_API_HOST = "https://api.nextchat.dev";
|
||||
export const OPENAI_BASE_URL = "https://api.openai.com";
|
||||
export const ANTHROPIC_BASE_URL = "https://api.anthropic.com";
|
||||
|
||||
|
@ -29,6 +28,12 @@ export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
|
|||
export const MOONSHOT_BASE_URL = "https://api.moonshot.cn";
|
||||
export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
|
||||
|
||||
export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
|
||||
|
||||
export const XAI_BASE_URL = "https://api.x.ai";
|
||||
|
||||
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
|
||||
|
||||
export const CACHE_URL_PREFIX = "/api/cache";
|
||||
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
|
||||
|
||||
|
@ -60,6 +65,9 @@ export enum ApiPath {
|
|||
Iflytek = "/api/iflytek",
|
||||
Stability = "/api/stability",
|
||||
Artifacts = "/api/artifacts",
|
||||
XAI = "/api/xai",
|
||||
ChatGLM = "/api/chatglm",
|
||||
DeepSeek = "/api/deepseek",
|
||||
}
|
||||
|
||||
export enum SlotID {
|
||||
|
@ -112,6 +120,9 @@ export enum ServiceProvider {
|
|||
Moonshot = "Moonshot",
|
||||
Stability = "Stability",
|
||||
Iflytek = "Iflytek",
|
||||
XAI = "XAI",
|
||||
ChatGLM = "ChatGLM",
|
||||
DeepSeek = "DeepSeek",
|
||||
}
|
||||
|
||||
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
|
||||
|
@ -134,6 +145,9 @@ export enum ModelProvider {
|
|||
Hunyuan = "Hunyuan",
|
||||
Moonshot = "Moonshot",
|
||||
Iflytek = "Iflytek",
|
||||
XAI = "XAI",
|
||||
ChatGLM = "ChatGLM",
|
||||
DeepSeek = "DeepSeek",
|
||||
}
|
||||
|
||||
export const Stability = {
|
||||
|
@ -216,6 +230,23 @@ export const Iflytek = {
|
|||
ChatPath: "v1/chat/completions",
|
||||
};
|
||||
|
||||
export const DeepSeek = {
|
||||
ExampleEndpoint: DEEPSEEK_BASE_URL,
|
||||
ChatPath: "chat/completions",
|
||||
};
|
||||
|
||||
export const XAI = {
|
||||
ExampleEndpoint: XAI_BASE_URL,
|
||||
ChatPath: "v1/chat/completions",
|
||||
};
|
||||
|
||||
export const ChatGLM = {
|
||||
ExampleEndpoint: CHATGLM_BASE_URL,
|
||||
ChatPath: "api/paas/v4/chat/completions",
|
||||
ImagePath: "api/paas/v4/images/generations",
|
||||
VideoPath: "api/paas/v4/videos/generations",
|
||||
};
|
||||
|
||||
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
|
||||
// export const DEFAULT_SYSTEM_TEMPLATE = `
|
||||
// You are ChatGPT, a large language model trained by {{ServiceProvider}}.
|
||||
|
@ -245,6 +276,7 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
|||
"gpt-4o": "2023-10",
|
||||
"gpt-4o-2024-05-13": "2023-10",
|
||||
"gpt-4o-2024-08-06": "2023-10",
|
||||
"gpt-4o-2024-11-20": "2023-10",
|
||||
"chatgpt-4o-latest": "2023-10",
|
||||
"gpt-4o-mini": "2023-10",
|
||||
"gpt-4o-mini-2024-07-18": "2023-10",
|
||||
|
@ -255,6 +287,8 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
|||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||
"gemini-pro": "2023-12",
|
||||
"gemini-pro-vision": "2023-12",
|
||||
"deepseek-chat": "2024-07",
|
||||
"deepseek-coder": "2024-07",
|
||||
};
|
||||
|
||||
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
|
||||
|
@ -271,6 +305,23 @@ export const DEFAULT_TTS_VOICES = [
|
|||
"shimmer",
|
||||
];
|
||||
|
||||
export const VISION_MODEL_REGEXES = [
|
||||
/vision/,
|
||||
/gpt-4o/,
|
||||
/claude-3/,
|
||||
/gemini-1\.5/,
|
||||
/gemini-exp/,
|
||||
/gemini-2\.0/,
|
||||
/learnlm/,
|
||||
/qwen-vl/,
|
||||
/qwen2-vl/,
|
||||
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
|
||||
/^dall-e-3$/, // Matches exactly "dall-e-3"
|
||||
/glm-4v/,
|
||||
];
|
||||
|
||||
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
|
||||
|
||||
const openaiModels = [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-1106",
|
||||
|
@ -284,6 +335,7 @@ const openaiModels = [
|
|||
"gpt-4o",
|
||||
"gpt-4o-2024-05-13",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-11-20",
|
||||
"chatgpt-4o-latest",
|
||||
"gpt-4o-mini",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
|
@ -296,10 +348,23 @@ const openaiModels = [
|
|||
];
|
||||
|
||||
const googleModels = [
|
||||
"gemini-1.0-pro",
|
||||
"gemini-1.0-pro", // Deprecated on 2/15/2025
|
||||
"gemini-1.5-pro-latest",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-pro-exp-0827",
|
||||
"gemini-1.5-flash-latest",
|
||||
"gemini-pro-vision",
|
||||
"gemini-1.5-flash-8b-latest",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-1.5-flash-8b",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-exp-0827",
|
||||
"learnlm-1.5-pro-experimental",
|
||||
"gemini-exp-1114",
|
||||
"gemini-exp-1121",
|
||||
"gemini-exp-1206",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.0-flash-thinking-exp-1219",
|
||||
];
|
||||
|
||||
const anthropicModels = [
|
||||
|
@ -308,8 +373,13 @@ const anthropicModels = [
|
|||
"claude-2.1",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-opus-latest",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-haiku-latest",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-sonnet-latest",
|
||||
];
|
||||
|
||||
const baiduModels = [
|
||||
|
@ -365,6 +435,30 @@ const iflytekModels = [
|
|||
"4.0Ultra",
|
||||
];
|
||||
|
||||
const deepseekModels = ["deepseek-chat", "deepseek-coder"];
|
||||
|
||||
const xAIModes = ["grok-beta"];
|
||||
|
||||
const chatglmModels = [
|
||||
"glm-4-plus",
|
||||
"glm-4-0520",
|
||||
"glm-4",
|
||||
"glm-4-air",
|
||||
"glm-4-airx",
|
||||
"glm-4-long",
|
||||
"glm-4-flashx",
|
||||
"glm-4-flash",
|
||||
"glm-4v-plus",
|
||||
"glm-4v",
|
||||
"glm-4v-flash", // free
|
||||
"cogview-3-plus",
|
||||
"cogview-3",
|
||||
"cogview-3-flash", // free
|
||||
// 目前无法适配轮询任务
|
||||
// "cogvideox",
|
||||
// "cogvideox-flash", // free
|
||||
];
|
||||
|
||||
let seq = 1000; // 内置的模型序号生成器从1000开始
|
||||
export const DEFAULT_MODELS = [
|
||||
...openaiModels.map((name) => ({
|
||||
|
@ -477,6 +571,39 @@ export const DEFAULT_MODELS = [
|
|||
sorted: 10,
|
||||
},
|
||||
})),
|
||||
...xAIModes.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "xai",
|
||||
providerName: "XAI",
|
||||
providerType: "xai",
|
||||
sorted: 11,
|
||||
},
|
||||
})),
|
||||
...chatglmModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "chatglm",
|
||||
providerName: "ChatGLM",
|
||||
providerType: "chatglm",
|
||||
sorted: 12,
|
||||
},
|
||||
})),
|
||||
...deepseekModels.map((name) => ({
|
||||
name,
|
||||
available: true,
|
||||
sorted: seq++,
|
||||
provider: {
|
||||
id: "deepseek",
|
||||
providerName: "DeepSeek",
|
||||
providerType: "deepseek",
|
||||
sorted: 13,
|
||||
},
|
||||
})),
|
||||
] as const;
|
||||
|
||||
export const CHAT_PAGE_SIZE = 15;
|
||||
|
@ -496,11 +623,6 @@ export const internalAllowedWebDavEndpoints = [
|
|||
];
|
||||
|
||||
export const DEFAULT_GA_ID = "G-89WN60ZK2E";
|
||||
export const PLUGINS = [
|
||||
{ name: "Plugins", path: Path.Plugins },
|
||||
{ name: "Stable Diffusion", path: Path.Sd },
|
||||
{ name: "Search Chat", path: Path.SearchChat },
|
||||
];
|
||||
|
||||
export const SAAS_CHAT_URL = "https://nextchat.dev/chat";
|
||||
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github";
|
||||
|
|
|
@ -26,6 +26,13 @@ declare interface Window {
|
|||
isPermissionGranted(): Promise<boolean>;
|
||||
sendNotification(options: string | Options): void;
|
||||
};
|
||||
updater: {
|
||||
checkUpdate(): Promise<UpdateResult>;
|
||||
installUpdate(): Promise<void>;
|
||||
onUpdaterEvent(
|
||||
handler: (status: UpdateStatusResult) => void,
|
||||
): Promise<UnlistenFn>;
|
||||
};
|
||||
http: {
|
||||
fetch<T>(
|
||||
url: string,
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="16" height="16" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4 28C4 26.8954 4.89543 26 6 26H10V38H6C4.89543 38 4 37.1046 4 36V28Z" fill="none" />
|
||||
<path d="M38 26H42C43.1046 26 44 26.8954 44 28V36C44 37.1046 43.1046 38 42 38H38V26Z"
|
||||
fill="none" />
|
||||
<path
|
||||
d="M10 36V24C10 16.268 16.268 10 24 10C31.732 10 38 16.268 38 24V36M10 26H6C4.89543 26 4 26.8954 4 28V36C4 37.1046 4.89543 38 6 38H10V26ZM38 26H42C43.1046 26 44 26.8954 44 28V36C44 37.1046 43.1046 38 42 38H38V26Z"
|
||||
stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
|
||||
<path d="M16 32H20L22 26L26 38L28 32H32" stroke="#333" stroke-width="4" stroke-linecap="round"
|
||||
stroke-linejoin="round" />
|
||||
</svg>
|
After Width: | Height: | Size: 808 B |
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path
|
||||
d="M14.5 8C13.8406 8.37652 13.2062 8.79103 12.6 9.24051C11.5625 10.0097 10.6074 10.8814 9.75 11.8402C6.79377 15.1463 5 19.4891 5 24.2455C5 34.6033 13.5066 43 24 43C34.4934 43 43 34.6033 43 24.2455C43 19.4891 41.2062 15.1463 38.25 11.8402C37.3926 10.8814 36.4375 10.0097 35.4 9.24051C34.7938 8.79103 34.1594 8.37652 33.5 8"
|
||||
stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
|
||||
<path d="M24 4V24" stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
|
||||
</svg>
|
After Width: | Height: | Size: 675 B |
|
@ -0,0 +1,13 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path
|
||||
d="M31 24V11C31 7.13401 27.866 4 24 4C20.134 4 17 7.13401 17 11V24C17 27.866 20.134 31 24 31C27.866 31 31 27.866 31 24Z"
|
||||
stroke="#d0021b" stroke-width="4" stroke-linejoin="round" />
|
||||
<path
|
||||
d="M9 23C9 31.2843 15.7157 38 24 38C25.7532 38 27.4361 37.6992 29 37.1465M39 23C39 25.1333 38.5547 27.1626 37.7519 29"
|
||||
stroke="#d0021b" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
|
||||
<path d="M24 38V44" stroke="#d0021b" stroke-width="4" stroke-linecap="round"
|
||||
stroke-linejoin="round" />
|
||||
<path d="M42 42L6 6" stroke="#d0021b" stroke-width="4" stroke-linecap="round"
|
||||
stroke-linejoin="round" />
|
||||
</svg>
|
After Width: | Height: | Size: 811 B |
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect x="17" y="4" width="14" height="27" rx="7" fill="none" stroke="#333" stroke-width="4"
|
||||
stroke-linejoin="round" />
|
||||
<path d="M9 23C9 31.2843 15.7157 38 24 38C32.2843 38 39 31.2843 39 23" stroke="#333"
|
||||
stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
|
||||
<path d="M24 38V44" stroke="#333" stroke-width="4" stroke-linecap="round"
|
||||
stroke-linejoin="round" />
|
||||
</svg>
|
After Width: | Height: | Size: 549 B |
|
@ -0,0 +1,200 @@
|
|||
export class AudioHandler {
|
||||
private context: AudioContext;
|
||||
private mergeNode: ChannelMergerNode;
|
||||
private analyserData: Uint8Array;
|
||||
public analyser: AnalyserNode;
|
||||
private workletNode: AudioWorkletNode | null = null;
|
||||
private stream: MediaStream | null = null;
|
||||
private source: MediaStreamAudioSourceNode | null = null;
|
||||
private recordBuffer: Int16Array[] = [];
|
||||
private readonly sampleRate = 24000;
|
||||
|
||||
private nextPlayTime: number = 0;
|
||||
private isPlaying: boolean = false;
|
||||
private playbackQueue: AudioBufferSourceNode[] = [];
|
||||
private playBuffer: Int16Array[] = [];
|
||||
|
||||
constructor() {
|
||||
this.context = new AudioContext({ sampleRate: this.sampleRate });
|
||||
// using ChannelMergerNode to get merged audio data, and then get analyser data.
|
||||
this.mergeNode = new ChannelMergerNode(this.context, { numberOfInputs: 2 });
|
||||
this.analyser = new AnalyserNode(this.context, { fftSize: 256 });
|
||||
this.analyserData = new Uint8Array(this.analyser.frequencyBinCount);
|
||||
this.mergeNode.connect(this.analyser);
|
||||
}
|
||||
|
||||
getByteFrequencyData() {
|
||||
this.analyser.getByteFrequencyData(this.analyserData);
|
||||
return this.analyserData;
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
await this.context.audioWorklet.addModule("/audio-processor.js");
|
||||
}
|
||||
|
||||
async startRecording(onChunk: (chunk: Uint8Array) => void) {
|
||||
try {
|
||||
if (!this.workletNode) {
|
||||
await this.initialize();
|
||||
}
|
||||
|
||||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
channelCount: 1,
|
||||
sampleRate: this.sampleRate,
|
||||
echoCancellation: true,
|
||||
noiseSuppression: true,
|
||||
},
|
||||
});
|
||||
|
||||
await this.context.resume();
|
||||
this.source = this.context.createMediaStreamSource(this.stream);
|
||||
this.workletNode = new AudioWorkletNode(
|
||||
this.context,
|
||||
"audio-recorder-processor",
|
||||
);
|
||||
|
||||
this.workletNode.port.onmessage = (event) => {
|
||||
if (event.data.eventType === "audio") {
|
||||
const float32Data = event.data.audioData;
|
||||
const int16Data = new Int16Array(float32Data.length);
|
||||
|
||||
for (let i = 0; i < float32Data.length; i++) {
|
||||
const s = Math.max(-1, Math.min(1, float32Data[i]));
|
||||
int16Data[i] = s < 0 ? s * 0x8000 : s * 0x7fff;
|
||||
}
|
||||
|
||||
const uint8Data = new Uint8Array(int16Data.buffer);
|
||||
onChunk(uint8Data);
|
||||
// save recordBuffer
|
||||
// @ts-ignore
|
||||
this.recordBuffer.push.apply(this.recordBuffer, int16Data);
|
||||
}
|
||||
};
|
||||
|
||||
this.source.connect(this.workletNode);
|
||||
this.source.connect(this.mergeNode, 0, 0);
|
||||
this.workletNode.connect(this.context.destination);
|
||||
|
||||
this.workletNode.port.postMessage({ command: "START_RECORDING" });
|
||||
} catch (error) {
|
||||
console.error("Error starting recording:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
stopRecording() {
|
||||
if (!this.workletNode || !this.source || !this.stream) {
|
||||
throw new Error("Recording not started");
|
||||
}
|
||||
|
||||
this.workletNode.port.postMessage({ command: "STOP_RECORDING" });
|
||||
|
||||
this.workletNode.disconnect();
|
||||
this.source.disconnect();
|
||||
this.stream.getTracks().forEach((track) => track.stop());
|
||||
}
|
||||
startStreamingPlayback() {
|
||||
this.isPlaying = true;
|
||||
this.nextPlayTime = this.context.currentTime;
|
||||
}
|
||||
|
||||
stopStreamingPlayback() {
|
||||
this.isPlaying = false;
|
||||
this.playbackQueue.forEach((source) => source.stop());
|
||||
this.playbackQueue = [];
|
||||
this.playBuffer = [];
|
||||
}
|
||||
|
||||
playChunk(chunk: Uint8Array) {
|
||||
if (!this.isPlaying) return;
|
||||
|
||||
const int16Data = new Int16Array(chunk.buffer);
|
||||
// @ts-ignore
|
||||
this.playBuffer.push.apply(this.playBuffer, int16Data); // save playBuffer
|
||||
|
||||
const float32Data = new Float32Array(int16Data.length);
|
||||
for (let i = 0; i < int16Data.length; i++) {
|
||||
float32Data[i] = int16Data[i] / (int16Data[i] < 0 ? 0x8000 : 0x7fff);
|
||||
}
|
||||
|
||||
const audioBuffer = this.context.createBuffer(
|
||||
1,
|
||||
float32Data.length,
|
||||
this.sampleRate,
|
||||
);
|
||||
audioBuffer.getChannelData(0).set(float32Data);
|
||||
|
||||
const source = this.context.createBufferSource();
|
||||
source.buffer = audioBuffer;
|
||||
source.connect(this.context.destination);
|
||||
source.connect(this.mergeNode, 0, 1);
|
||||
|
||||
const chunkDuration = audioBuffer.length / this.sampleRate;
|
||||
|
||||
source.start(this.nextPlayTime);
|
||||
|
||||
this.playbackQueue.push(source);
|
||||
source.onended = () => {
|
||||
const index = this.playbackQueue.indexOf(source);
|
||||
if (index > -1) {
|
||||
this.playbackQueue.splice(index, 1);
|
||||
}
|
||||
};
|
||||
|
||||
this.nextPlayTime += chunkDuration;
|
||||
|
||||
if (this.nextPlayTime < this.context.currentTime) {
|
||||
this.nextPlayTime = this.context.currentTime;
|
||||
}
|
||||
}
|
||||
_saveData(data: Int16Array, bytesPerSample = 16): Blob {
|
||||
const headerLength = 44;
|
||||
const numberOfChannels = 1;
|
||||
const byteLength = data.buffer.byteLength;
|
||||
const header = new Uint8Array(headerLength);
|
||||
const view = new DataView(header.buffer);
|
||||
view.setUint32(0, 1380533830, false); // RIFF identifier 'RIFF'
|
||||
view.setUint32(4, 36 + byteLength, true); // file length minus RIFF identifier length and file description length
|
||||
view.setUint32(8, 1463899717, false); // RIFF type 'WAVE'
|
||||
view.setUint32(12, 1718449184, false); // format chunk identifier 'fmt '
|
||||
view.setUint32(16, 16, true); // format chunk length
|
||||
view.setUint16(20, 1, true); // sample format (raw)
|
||||
view.setUint16(22, numberOfChannels, true); // channel count
|
||||
view.setUint32(24, this.sampleRate, true); // sample rate
|
||||
view.setUint32(28, this.sampleRate * 4, true); // byte rate (sample rate * block align)
|
||||
view.setUint16(32, numberOfChannels * 2, true); // block align (channel count * bytes per sample)
|
||||
view.setUint16(34, bytesPerSample, true); // bits per sample
|
||||
view.setUint32(36, 1684108385, false); // data chunk identifier 'data'
|
||||
view.setUint32(40, byteLength, true); // data chunk length
|
||||
|
||||
// using data.buffer, so no need to setUint16 to view.
|
||||
return new Blob([view, data.buffer], { type: "audio/mpeg" });
|
||||
}
|
||||
savePlayFile() {
|
||||
// @ts-ignore
|
||||
return this._saveData(new Int16Array(this.playBuffer));
|
||||
}
|
||||
saveRecordFile(
|
||||
audioStartMillis: number | undefined,
|
||||
audioEndMillis: number | undefined,
|
||||
) {
|
||||
const startIndex = audioStartMillis
|
||||
? Math.floor((audioStartMillis * this.sampleRate) / 1000)
|
||||
: 0;
|
||||
const endIndex = audioEndMillis
|
||||
? Math.floor((audioEndMillis * this.sampleRate) / 1000)
|
||||
: this.recordBuffer.length;
|
||||
return this._saveData(
|
||||
// @ts-ignore
|
||||
new Int16Array(this.recordBuffer.slice(startIndex, endIndex)),
|
||||
);
|
||||
}
|
||||
async close() {
|
||||
this.recordBuffer = [];
|
||||
this.workletNode?.disconnect();
|
||||
this.source?.disconnect();
|
||||
this.stream?.getTracks().forEach((track) => track.stop());
|
||||
await this.context.close();
|
||||
}
|
||||
}
|
|
@ -106,6 +106,7 @@ const cn = {
|
|||
copyLastMessage: "复制最后一个回复",
|
||||
copyLastCode: "复制最后一个代码块",
|
||||
showShortcutKey: "显示快捷方式",
|
||||
clearContext: "清除上下文",
|
||||
},
|
||||
},
|
||||
Export: {
|
||||
|
@ -176,7 +177,7 @@ const cn = {
|
|||
},
|
||||
},
|
||||
Lang: {
|
||||
Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language`
|
||||
Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
|
||||
All: "所有语言",
|
||||
},
|
||||
Avatar: "头像",
|
||||
|
@ -205,6 +206,8 @@ const cn = {
|
|||
IsChecking: "正在检查更新...",
|
||||
FoundUpdate: (x: string) => `发现新版本:${x}`,
|
||||
GoToUpdate: "前往更新",
|
||||
Success: "更新成功!",
|
||||
Failed: "更新失败",
|
||||
},
|
||||
SendKey: "发送键",
|
||||
Theme: "主题",
|
||||
|
@ -460,6 +463,39 @@ const cn = {
|
|||
SubTitle: "样例:",
|
||||
},
|
||||
},
|
||||
DeepSeek: {
|
||||
ApiKey: {
|
||||
Title: "接口密钥",
|
||||
SubTitle: "使用自定义DeepSeek API Key",
|
||||
Placeholder: "DeepSeek API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "接口地址",
|
||||
SubTitle: "样例:",
|
||||
},
|
||||
},
|
||||
XAI: {
|
||||
ApiKey: {
|
||||
Title: "接口密钥",
|
||||
SubTitle: "使用自定义XAI API Key",
|
||||
Placeholder: "XAI API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "接口地址",
|
||||
SubTitle: "样例:",
|
||||
},
|
||||
},
|
||||
ChatGLM: {
|
||||
ApiKey: {
|
||||
Title: "接口密钥",
|
||||
SubTitle: "使用自定义 ChatGLM API Key",
|
||||
Placeholder: "ChatGLM API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "接口地址",
|
||||
SubTitle: "样例:",
|
||||
},
|
||||
},
|
||||
Stability: {
|
||||
ApiKey: {
|
||||
Title: "接口密钥",
|
||||
|
@ -495,8 +531,8 @@ const cn = {
|
|||
|
||||
Model: "模型 (model)",
|
||||
CompressModel: {
|
||||
Title: "压缩模型",
|
||||
SubTitle: "用于压缩历史记录的模型",
|
||||
Title: "对话摘要模型",
|
||||
SubTitle: "用于压缩历史记录、生成对话标题的模型",
|
||||
},
|
||||
Temperature: {
|
||||
Title: "随机性 (temperature)",
|
||||
|
@ -538,6 +574,39 @@ const cn = {
|
|||
SubTitle: "生成语音的速度",
|
||||
},
|
||||
},
|
||||
Realtime: {
|
||||
Enable: {
|
||||
Title: "实时聊天",
|
||||
SubTitle: "开启实时聊天功能",
|
||||
},
|
||||
Provider: {
|
||||
Title: "模型服务商",
|
||||
SubTitle: "切换不同的服务商",
|
||||
},
|
||||
Model: {
|
||||
Title: "模型",
|
||||
SubTitle: "选择一个模型",
|
||||
},
|
||||
ApiKey: {
|
||||
Title: "API Key",
|
||||
SubTitle: "API Key",
|
||||
Placeholder: "API Key",
|
||||
},
|
||||
Azure: {
|
||||
Endpoint: {
|
||||
Title: "接口地址",
|
||||
SubTitle: "接口地址",
|
||||
},
|
||||
Deployment: {
|
||||
Title: "部署名称",
|
||||
SubTitle: "部署名称",
|
||||
},
|
||||
},
|
||||
Temperature: {
|
||||
Title: "随机性 (temperature)",
|
||||
SubTitle: "值越大,回复越随机",
|
||||
},
|
||||
},
|
||||
},
|
||||
Store: {
|
||||
DefaultTopic: "新的聊天",
|
||||
|
@ -573,7 +642,7 @@ const cn = {
|
|||
Sysmessage: "你是一个助手",
|
||||
},
|
||||
SearchChat: {
|
||||
Name: "搜索",
|
||||
Name: "搜索聊天记录",
|
||||
Page: {
|
||||
Title: "搜索聊天记录",
|
||||
Search: "输入搜索关键词",
|
||||
|
@ -665,6 +734,10 @@ const cn = {
|
|||
Title: "启用Artifacts",
|
||||
SubTitle: "启用之后可以直接渲染HTML页面",
|
||||
},
|
||||
CodeFold: {
|
||||
Title: "启用代码折叠",
|
||||
SubTitle: "启用之后可以自动折叠/展开过长的代码块",
|
||||
},
|
||||
Share: {
|
||||
Title: "分享此面具",
|
||||
SubTitle: "生成此面具的直达链接",
|
||||
|
|
|
@ -107,6 +107,7 @@ const en: LocaleType = {
|
|||
copyLastMessage: "Copy Last Reply",
|
||||
copyLastCode: "Copy Last Code Block",
|
||||
showShortcutKey: "Show Shortcuts",
|
||||
clearContext: "Clear Context",
|
||||
},
|
||||
},
|
||||
Export: {
|
||||
|
@ -207,6 +208,8 @@ const en: LocaleType = {
|
|||
IsChecking: "Checking update...",
|
||||
FoundUpdate: (x: string) => `Found new version: ${x}`,
|
||||
GoToUpdate: "Update",
|
||||
Success: "Update Successful.",
|
||||
Failed: "Update Failed.",
|
||||
},
|
||||
SendKey: "Send Key",
|
||||
Theme: "Theme",
|
||||
|
@ -444,6 +447,39 @@ const en: LocaleType = {
|
|||
SubTitle: "Example: ",
|
||||
},
|
||||
},
|
||||
DeepSeek: {
|
||||
ApiKey: {
|
||||
Title: "DeepSeek API Key",
|
||||
SubTitle: "Use a custom DeepSeek API Key",
|
||||
Placeholder: "DeepSeek API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "Endpoint Address",
|
||||
SubTitle: "Example: ",
|
||||
},
|
||||
},
|
||||
XAI: {
|
||||
ApiKey: {
|
||||
Title: "XAI API Key",
|
||||
SubTitle: "Use a custom XAI API Key",
|
||||
Placeholder: "XAI API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "Endpoint Address",
|
||||
SubTitle: "Example: ",
|
||||
},
|
||||
},
|
||||
ChatGLM: {
|
||||
ApiKey: {
|
||||
Title: "ChatGLM API Key",
|
||||
SubTitle: "Use a custom ChatGLM API Key",
|
||||
Placeholder: "ChatGLM API Key",
|
||||
},
|
||||
Endpoint: {
|
||||
Title: "Endpoint Address",
|
||||
SubTitle: "Example: ",
|
||||
},
|
||||
},
|
||||
Stability: {
|
||||
ApiKey: {
|
||||
Title: "Stability API Key",
|
||||
|
@ -500,8 +536,8 @@ const en: LocaleType = {
|
|||
|
||||
Model: "Model",
|
||||
CompressModel: {
|
||||
Title: "Compression Model",
|
||||
SubTitle: "Model used to compress history",
|
||||
Title: "Summary Model",
|
||||
SubTitle: "Model used to compress history and generate title",
|
||||
},
|
||||
Temperature: {
|
||||
Title: "Temperature",
|
||||
|
@ -546,6 +582,39 @@ const en: LocaleType = {
|
|||
},
|
||||
Engine: "TTS Engine",
|
||||
},
|
||||
Realtime: {
|
||||
Enable: {
|
||||
Title: "Realtime Chat",
|
||||
SubTitle: "Enable realtime chat feature",
|
||||
},
|
||||
Provider: {
|
||||
Title: "Model Provider",
|
||||
SubTitle: "Switch between different providers",
|
||||
},
|
||||
Model: {
|
||||
Title: "Model",
|
||||
SubTitle: "Select a model",
|
||||
},
|
||||
ApiKey: {
|
||||
Title: "API Key",
|
||||
SubTitle: "API Key",
|
||||
Placeholder: "API Key",
|
||||
},
|
||||
Azure: {
|
||||
Endpoint: {
|
||||
Title: "Endpoint",
|
||||
SubTitle: "Endpoint",
|
||||
},
|
||||
Deployment: {
|
||||
Title: "Deployment Name",
|
||||
SubTitle: "Deployment Name",
|
||||
},
|
||||
},
|
||||
Temperature: {
|
||||
Title: "Randomness (temperature)",
|
||||
SubTitle: "Higher values result in more random responses",
|
||||
},
|
||||
},
|
||||
},
|
||||
Store: {
|
||||
DefaultTopic: "New Conversation",
|
||||
|
@ -675,6 +744,11 @@ const en: LocaleType = {
|
|||
Title: "Enable Artifacts",
|
||||
SubTitle: "Can render HTML page when enable artifacts.",
|
||||
},
|
||||
CodeFold: {
|
||||
Title: "Enable CodeFold",
|
||||
SubTitle:
|
||||
"Automatically collapse/expand overly long code blocks when CodeFold is enabled",
|
||||
},
|
||||
Share: {
|
||||
Title: "Share This Mask",
|
||||
SubTitle: "Generate a link to this mask",
|
||||
|
|
|
@ -8,12 +8,12 @@ const tw = {
|
|||
Error: {
|
||||
Unauthorized: isApp
|
||||
? `😆 對話遇到了一些問題,不用慌:
|
||||
\\ 1️⃣ 想要零配置開箱即用,[點擊這裡立刻開啟對話 🚀](${SAAS_CHAT_UTM_URL})
|
||||
\\ 2️⃣ 如果你想消耗自己的 OpenAI 資源,點擊[這裡](/#/settings)修改設定 ⚙️`
|
||||
\\ 1️⃣ 想要無須設定開箱即用,[點選這裡立刻開啟對話 🚀](${SAAS_CHAT_UTM_URL})
|
||||
\\ 2️⃣ 如果你想消耗自己的 OpenAI 資源,點選[這裡](/#/settings)修改設定 ⚙️`
|
||||
: `😆 對話遇到了一些問題,不用慌:
|
||||
\ 1️⃣ 想要零配置開箱即用,[點擊這裡立刻開啟對話 🚀](${SAAS_CHAT_UTM_URL})
|
||||
\ 2️⃣ 如果你正在使用私有部署版本,點擊[這裡](/#/auth)輸入訪問秘鑰 🔑
|
||||
\ 3️⃣ 如果你想消耗自己的 OpenAI 資源,點擊[這裡](/#/settings)修改設定 ⚙️
|
||||
\ 1️⃣ 想要無須設定開箱即用,[點選這裡立刻開啟對話 🚀](${SAAS_CHAT_UTM_URL})
|
||||
\ 2️⃣ 如果你正在使用私有部署版本,點選[這裡](/#/auth)輸入存取金鑰 🔑
|
||||
\ 3️⃣ 如果你想消耗自己的 OpenAI 資源,點選[這裡](/#/settings)修改設定 ⚙️
|
||||
`,
|
||||
},
|
||||
|
||||
|
@ -25,9 +25,9 @@ const tw = {
|
|||
Confirm: "確認",
|
||||
Later: "稍候再說",
|
||||
Return: "返回",
|
||||
SaasTips: "配置太麻煩,想要立即使用",
|
||||
SaasTips: "設定太麻煩,想要立即使用",
|
||||
TopTips:
|
||||
"🥳 NextChat AI 首發優惠,立刻解鎖 OpenAI o1, GPT-4o, Claude-3.5 等最新大模型",
|
||||
"🥳 NextChat AI 首發優惠,立刻解鎖 OpenAI o1, GPT-4o, Claude-3.5 等最新的大型語言模型",
|
||||
},
|
||||
ChatItem: {
|
||||
ChatItemCount: (count: number) => `${count} 則對話`,
|
||||
|
@ -53,8 +53,8 @@ const tw = {
|
|||
PinToastAction: "檢視",
|
||||
Delete: "刪除",
|
||||
Edit: "編輯",
|
||||
RefreshTitle: "刷新標題",
|
||||
RefreshToast: "已發送刷新標題請求",
|
||||
RefreshTitle: "重新整理標題",
|
||||
RefreshToast: "已傳送重新整理標題請求",
|
||||
},
|
||||
Commands: {
|
||||
new: "新建聊天",
|
||||
|
@ -95,11 +95,12 @@ const tw = {
|
|||
IsContext: "預設提示詞",
|
||||
ShortcutKey: {
|
||||
Title: "鍵盤快捷方式",
|
||||
newChat: "打開新聊天",
|
||||
newChat: "開啟新聊天",
|
||||
focusInput: "聚焦輸入框",
|
||||
copyLastMessage: "複製最後一個回覆",
|
||||
copyLastCode: "複製最後一個代碼塊",
|
||||
copyLastCode: "複製最後一個程式碼區塊",
|
||||
showShortcutKey: "顯示快捷方式",
|
||||
clearContext: "清除上下文",
|
||||
},
|
||||
},
|
||||
Export: {
|
||||
|
@ -174,9 +175,9 @@ const tw = {
|
|||
SubTitle: "聊天內容的字型大小",
|
||||
},
|
||||
FontFamily: {
|
||||
Title: "聊天字體",
|
||||
SubTitle: "聊天內容的字體,若置空則應用全局默認字體",
|
||||
Placeholder: "字體名稱",
|
||||
Title: "聊天字型",
|
||||
SubTitle: "聊天內容的字型,若留空則套用全域預設字型",
|
||||
Placeholder: "字型名稱",
|
||||
},
|
||||
InjectSystemPrompts: {
|
||||
Title: "匯入系統提示",
|
||||
|
@ -301,8 +302,8 @@ const tw = {
|
|||
Title: "使用 NextChat AI",
|
||||
Label: "(性價比最高的方案)",
|
||||
SubTitle:
|
||||
"由 NextChat 官方維護,零配置開箱即用,支持 OpenAI o1、GPT-4o、Claude-3.5 等最新大模型",
|
||||
ChatNow: "立刻對話",
|
||||
"由 NextChat 官方維護,無須設定開箱即用,支援 OpenAI o1、GPT-4o、Claude-3.5 等最新的大型語言模型",
|
||||
ChatNow: "立刻開始對話",
|
||||
},
|
||||
|
||||
AccessCode: {
|
||||
|
@ -485,18 +486,18 @@ const tw = {
|
|||
},
|
||||
},
|
||||
SearchChat: {
|
||||
Name: "搜索",
|
||||
Name: "搜尋聊天記錄",
|
||||
Page: {
|
||||
Title: "搜索聊天記錄",
|
||||
Search: "輸入搜索關鍵詞",
|
||||
Title: "搜尋聊天記錄",
|
||||
Search: "輸入搜尋關鍵詞",
|
||||
NoResult: "沒有找到結果",
|
||||
NoData: "沒有數據",
|
||||
Loading: "加載中",
|
||||
NoData: "沒有資料",
|
||||
Loading: "載入中",
|
||||
|
||||
SubTitle: (count: number) => `找到 ${count} 條結果`,
|
||||
},
|
||||
Item: {
|
||||
View: "查看",
|
||||
View: "檢視",
|
||||
},
|
||||
},
|
||||
NewChat: {
|
||||
|
|
|
@ -3,7 +3,7 @@ import { BuiltinMask } from "./typing";
|
|||
export const CN_MASKS: BuiltinMask[] = [
|
||||
{
|
||||
avatar: "1f5bc-fe0f",
|
||||
name: "以文搜图",
|
||||
name: "AI文生图",
|
||||
context: [
|
||||
{
|
||||
id: "text-to-pic-0",
|
||||
|
@ -28,7 +28,7 @@ export const CN_MASKS: BuiltinMask[] = [
|
|||
id: "text-to-pic-3",
|
||||
role: "system",
|
||||
content:
|
||||
"助手善于判断用户意图,当确定需要提供图片时,助手会变得沉默寡言,只使用以下格式输出markdown图片:,因为这个语法可以自动按照提示生成并渲染图片。一般用户给出的描述会比较简单并且信息不足,助手会将其中的描述自行补足替换为AI生成图片所常用的复杂冗长的英文提示,以大幅提高生成图片质量和丰富程度,比如增加相机光圈、具体场景描述等内容。助手会避免用代码块或原始块包围markdown标记,因为那样只会渲染出代码块或原始块而不是图片。",
|
||||
"助手善于判断用户意图,当确定需要提供图片时,助手会变得沉默寡言,只使用以下格式输出markdown图片:,因为这个语法可以自动按照提示生成并渲染图片。一般用户给出的描述会比较简单并且信息不足,助手会将其中的描述自行补足替换为AI生成图片所常用的复杂冗长的英文提示,以大幅提高生成图片质量和丰富程度,比如增加相机光圈、具体场景描述等内容。助手会避免用代码块或原始块包围markdown标记,因为那样只会渲染出代码块或原始块而不是图片。url中的空格等符号需要转义。",
|
||||
date: "",
|
||||
},
|
||||
],
|
||||
|
|
|
@ -1,59 +1,58 @@
|
|||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
GoogleSafetySettingsThreshold,
|
||||
ServiceProvider,
|
||||
StoreKey,
|
||||
ApiPath,
|
||||
OPENAI_BASE_URL,
|
||||
ANTHROPIC_BASE_URL,
|
||||
GEMINI_BASE_URL,
|
||||
BAIDU_BASE_URL,
|
||||
BYTEDANCE_BASE_URL,
|
||||
ALIBABA_BASE_URL,
|
||||
TENCENT_BASE_URL,
|
||||
MOONSHOT_BASE_URL,
|
||||
STABILITY_BASE_URL,
|
||||
IFLYTEK_BASE_URL,
|
||||
DEEPSEEK_BASE_URL,
|
||||
XAI_BASE_URL,
|
||||
CHATGLM_BASE_URL,
|
||||
} from "../constant";
|
||||
import { getHeaders } from "../client/api";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import { ensure } from "../utils/clone";
|
||||
import { DEFAULT_CONFIG } from "./config";
|
||||
import { getModelProvider } from "../utils/model";
|
||||
|
||||
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
|
||||
|
||||
const isApp = getClientConfig()?.buildMode === "export";
|
||||
|
||||
const DEFAULT_OPENAI_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/openai"
|
||||
: ApiPath.OpenAI;
|
||||
const DEFAULT_OPENAI_URL = isApp ? OPENAI_BASE_URL : ApiPath.OpenAI;
|
||||
|
||||
const DEFAULT_GOOGLE_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/google"
|
||||
: ApiPath.Google;
|
||||
const DEFAULT_GOOGLE_URL = isApp ? GEMINI_BASE_URL : ApiPath.Google;
|
||||
|
||||
const DEFAULT_ANTHROPIC_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/anthropic"
|
||||
: ApiPath.Anthropic;
|
||||
const DEFAULT_ANTHROPIC_URL = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
|
||||
|
||||
const DEFAULT_BAIDU_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/baidu"
|
||||
: ApiPath.Baidu;
|
||||
const DEFAULT_BAIDU_URL = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
|
||||
|
||||
const DEFAULT_BYTEDANCE_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/bytedance"
|
||||
: ApiPath.ByteDance;
|
||||
const DEFAULT_BYTEDANCE_URL = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance;
|
||||
|
||||
const DEFAULT_ALIBABA_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/alibaba"
|
||||
: ApiPath.Alibaba;
|
||||
const DEFAULT_ALIBABA_URL = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
|
||||
|
||||
const DEFAULT_TENCENT_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/tencent"
|
||||
: ApiPath.Tencent;
|
||||
const DEFAULT_TENCENT_URL = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
|
||||
|
||||
const DEFAULT_MOONSHOT_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/moonshot"
|
||||
: ApiPath.Moonshot;
|
||||
const DEFAULT_MOONSHOT_URL = isApp ? MOONSHOT_BASE_URL : ApiPath.Moonshot;
|
||||
|
||||
const DEFAULT_STABILITY_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/stability"
|
||||
: ApiPath.Stability;
|
||||
const DEFAULT_STABILITY_URL = isApp ? STABILITY_BASE_URL : ApiPath.Stability;
|
||||
|
||||
const DEFAULT_IFLYTEK_URL = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/iflytek"
|
||||
: ApiPath.Iflytek;
|
||||
const DEFAULT_IFLYTEK_URL = isApp ? IFLYTEK_BASE_URL : ApiPath.Iflytek;
|
||||
|
||||
const DEFAULT_DEEPSEEK_URL = isApp ? DEEPSEEK_BASE_URL : ApiPath.DeepSeek;
|
||||
|
||||
const DEFAULT_XAI_URL = isApp ? XAI_BASE_URL : ApiPath.XAI;
|
||||
|
||||
const DEFAULT_CHATGLM_URL = isApp ? CHATGLM_BASE_URL : ApiPath.ChatGLM;
|
||||
|
||||
const DEFAULT_ACCESS_STATE = {
|
||||
accessCode: "",
|
||||
|
@ -112,6 +111,18 @@ const DEFAULT_ACCESS_STATE = {
|
|||
iflytekApiKey: "",
|
||||
iflytekApiSecret: "",
|
||||
|
||||
// deepseek
|
||||
deepseekUrl: DEFAULT_DEEPSEEK_URL,
|
||||
deepseekApiKey: "",
|
||||
|
||||
// xai
|
||||
xaiUrl: DEFAULT_XAI_URL,
|
||||
xaiApiKey: "",
|
||||
|
||||
// chatglm
|
||||
chatglmUrl: DEFAULT_CHATGLM_URL,
|
||||
chatglmApiKey: "",
|
||||
|
||||
// server config
|
||||
needCode: true,
|
||||
hideUserApiKey: false,
|
||||
|
@ -120,6 +131,7 @@ const DEFAULT_ACCESS_STATE = {
|
|||
disableFastLink: false,
|
||||
customModels: "",
|
||||
defaultModel: "",
|
||||
visionModels: "",
|
||||
|
||||
// tts config
|
||||
edgeTTSVoiceName: "zh-CN-YunxiNeural",
|
||||
|
@ -134,7 +146,10 @@ export const useAccessStore = createPersistStore(
|
|||
|
||||
return get().needCode;
|
||||
},
|
||||
|
||||
getVisionModels() {
|
||||
this.fetch();
|
||||
return get().visionModels;
|
||||
},
|
||||
edgeVoiceName() {
|
||||
this.fetch();
|
||||
|
||||
|
@ -179,6 +194,17 @@ export const useAccessStore = createPersistStore(
|
|||
isValidIflytek() {
|
||||
return ensure(get(), ["iflytekApiKey"]);
|
||||
},
|
||||
isValidDeepSeek() {
|
||||
return ensure(get(), ["deepseekApiKey"]);
|
||||
},
|
||||
|
||||
isValidXAI() {
|
||||
return ensure(get(), ["xaiApiKey"]);
|
||||
},
|
||||
|
||||
isValidChatGLM() {
|
||||
return ensure(get(), ["chatglmApiKey"]);
|
||||
},
|
||||
|
||||
isAuthorized() {
|
||||
this.fetch();
|
||||
|
@ -195,6 +221,9 @@ export const useAccessStore = createPersistStore(
|
|||
this.isValidTencent() ||
|
||||
this.isValidMoonshot() ||
|
||||
this.isValidIflytek() ||
|
||||
this.isValidDeepSeek() ||
|
||||
this.isValidXAI() ||
|
||||
this.isValidChatGLM() ||
|
||||
!this.enabledAccessControl() ||
|
||||
(this.enabledAccessControl() && ensure(get(), ["accessCode"]))
|
||||
);
|
||||
|
@ -211,10 +240,13 @@ export const useAccessStore = createPersistStore(
|
|||
})
|
||||
.then((res) => res.json())
|
||||
.then((res) => {
|
||||
// Set default model from env request
|
||||
let defaultModel = res.defaultModel ?? "";
|
||||
if (defaultModel !== "")
|
||||
DEFAULT_CONFIG.modelConfig.model = defaultModel;
|
||||
const defaultModel = res.defaultModel ?? "";
|
||||
if (defaultModel !== "") {
|
||||
const [model, providerName] = getModelProvider(defaultModel);
|
||||
DEFAULT_CONFIG.modelConfig.model = model;
|
||||
DEFAULT_CONFIG.modelConfig.providerName = providerName as any;
|
||||
}
|
||||
|
||||
return res;
|
||||
})
|
||||
.then((res: DangerConfig) => {
|
||||
|
|
|
@ -16,6 +16,9 @@ import {
|
|||
DEFAULT_SYSTEM_TEMPLATE,
|
||||
KnowledgeCutOffDate,
|
||||
StoreKey,
|
||||
SUMMARIZE_MODEL,
|
||||
GEMINI_SUMMARIZE_MODEL,
|
||||
ServiceProvider,
|
||||
} from "../constant";
|
||||
import Locale, { getLang } from "../locales";
|
||||
import { isDalle3, safeLocalStorage } from "../utils";
|
||||
|
@ -23,6 +26,8 @@ import { prettyObject } from "../utils/format";
|
|||
import { createPersistStore } from "../utils/store";
|
||||
import { estimateTokenLength } from "../utils/token";
|
||||
import { ModelConfig, ModelType, useAppConfig } from "./config";
|
||||
import { useAccessStore } from "./access";
|
||||
import { collectModelsWithDefaultModel } from "../utils/model";
|
||||
import { createEmptyMask, Mask } from "./mask";
|
||||
|
||||
const localStorage = safeLocalStorage();
|
||||
|
@ -37,6 +42,7 @@ export type ChatMessageTool = {
|
|||
};
|
||||
content?: string;
|
||||
isError?: boolean;
|
||||
errorMsg?: string;
|
||||
};
|
||||
|
||||
export type ChatMessage = RequestMessage & {
|
||||
|
@ -46,6 +52,7 @@ export type ChatMessage = RequestMessage & {
|
|||
id: string;
|
||||
model?: ModelType;
|
||||
tools?: ChatMessageTool[];
|
||||
audio_url?: string;
|
||||
};
|
||||
|
||||
export function createMessage(override: Partial<ChatMessage>): ChatMessage {
|
||||
|
@ -102,6 +109,35 @@ function createEmptySession(): ChatSession {
|
|||
};
|
||||
}
|
||||
|
||||
function getSummarizeModel(
|
||||
currentModel: string,
|
||||
providerName: string,
|
||||
): string[] {
|
||||
// if it is using gpt-* models, force to use 4o-mini to summarize
|
||||
if (currentModel.startsWith("gpt") || currentModel.startsWith("chatgpt")) {
|
||||
const configStore = useAppConfig.getState();
|
||||
const accessStore = useAccessStore.getState();
|
||||
const allModel = collectModelsWithDefaultModel(
|
||||
configStore.models,
|
||||
[configStore.customModels, accessStore.customModels].join(","),
|
||||
accessStore.defaultModel,
|
||||
);
|
||||
const summarizeModel = allModel.find(
|
||||
(m) => m.name === SUMMARIZE_MODEL && m.available,
|
||||
);
|
||||
if (summarizeModel) {
|
||||
return [
|
||||
summarizeModel.name,
|
||||
summarizeModel.provider?.providerName as string,
|
||||
];
|
||||
}
|
||||
}
|
||||
if (currentModel.startsWith("gemini")) {
|
||||
return [GEMINI_SUMMARIZE_MODEL, ServiceProvider.Google];
|
||||
}
|
||||
return [currentModel, providerName];
|
||||
}
|
||||
|
||||
function countMessages(msgs: ChatMessage[]) {
|
||||
return msgs.reduce(
|
||||
(pre, cur) => pre + estimateTokenLength(getMessageTextContent(cur)),
|
||||
|
@ -317,13 +353,13 @@ export const useChatStore = createPersistStore(
|
|||
return session;
|
||||
},
|
||||
|
||||
onNewMessage(message: ChatMessage) {
|
||||
get().updateCurrentSession((session) => {
|
||||
onNewMessage(message: ChatMessage, targetSession: ChatSession) {
|
||||
get().updateTargetSession(targetSession, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
session.lastUpdate = Date.now();
|
||||
});
|
||||
get().updateStat(message);
|
||||
get().summarizeSession();
|
||||
get().updateStat(message, targetSession);
|
||||
get().summarizeSession(false, targetSession);
|
||||
},
|
||||
|
||||
async onUserInput(content: string, attachImages?: string[]) {
|
||||
|
@ -337,22 +373,16 @@ export const useChatStore = createPersistStore(
|
|||
|
||||
if (attachImages && attachImages.length > 0) {
|
||||
mContent = [
|
||||
{
|
||||
type: "text",
|
||||
text: userContent,
|
||||
},
|
||||
...(userContent
|
||||
? [{ type: "text" as const, text: userContent }]
|
||||
: []),
|
||||
...attachImages.map((url) => ({
|
||||
type: "image_url" as const,
|
||||
image_url: { url },
|
||||
})),
|
||||
];
|
||||
mContent = mContent.concat(
|
||||
attachImages.map((url) => {
|
||||
return {
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: url,
|
||||
},
|
||||
};
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
let userMessage: ChatMessage = createMessage({
|
||||
role: "user",
|
||||
content: mContent,
|
||||
|
@ -367,10 +397,10 @@ export const useChatStore = createPersistStore(
|
|||
// get recent messages
|
||||
const recentMessages = get().getMessagesWithMemory();
|
||||
const sendMessages = recentMessages.concat(userMessage);
|
||||
const messageIndex = get().currentSession().messages.length + 1;
|
||||
const messageIndex = session.messages.length + 1;
|
||||
|
||||
// save user's and bot's message
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
const savedUserMessage = {
|
||||
...userMessage,
|
||||
content: mContent,
|
||||
|
@ -391,7 +421,7 @@ export const useChatStore = createPersistStore(
|
|||
if (message) {
|
||||
botMessage.content = message;
|
||||
}
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
},
|
||||
|
@ -399,13 +429,14 @@ export const useChatStore = createPersistStore(
|
|||
botMessage.streaming = false;
|
||||
if (message) {
|
||||
botMessage.content = message;
|
||||
get().onNewMessage(botMessage);
|
||||
botMessage.date = new Date().toLocaleString();
|
||||
get().onNewMessage(botMessage, session);
|
||||
}
|
||||
ChatControllerPool.remove(session.id, botMessage.id);
|
||||
},
|
||||
onBeforeTool(tool: ChatMessageTool) {
|
||||
(botMessage.tools = botMessage?.tools || []).push(tool);
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
},
|
||||
|
@ -415,7 +446,7 @@ export const useChatStore = createPersistStore(
|
|||
tools[i] = { ...tool };
|
||||
}
|
||||
});
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
},
|
||||
|
@ -430,7 +461,7 @@ export const useChatStore = createPersistStore(
|
|||
botMessage.streaming = false;
|
||||
userMessage.isError = !isAborted;
|
||||
botMessage.isError = !isAborted;
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
ChatControllerPool.remove(
|
||||
|
@ -562,24 +593,33 @@ export const useChatStore = createPersistStore(
|
|||
set(() => ({ sessions }));
|
||||
},
|
||||
|
||||
resetSession() {
|
||||
get().updateCurrentSession((session) => {
|
||||
resetSession(session: ChatSession) {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.messages = [];
|
||||
session.memoryPrompt = "";
|
||||
});
|
||||
},
|
||||
|
||||
summarizeSession(refreshTitle: boolean = false) {
|
||||
summarizeSession(
|
||||
refreshTitle: boolean = false,
|
||||
targetSession: ChatSession,
|
||||
) {
|
||||
const config = useAppConfig.getState();
|
||||
const session = get().currentSession();
|
||||
const session = targetSession;
|
||||
const modelConfig = session.mask.modelConfig;
|
||||
// skip summarize when using dalle3?
|
||||
if (isDalle3(modelConfig.model)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const providerName = modelConfig.compressProviderName;
|
||||
const api: ClientApi = getClientApi(providerName);
|
||||
// if not config compressModel, then using getSummarizeModel
|
||||
const [model, providerName] = modelConfig.compressModel
|
||||
? [modelConfig.compressModel, modelConfig.compressProviderName]
|
||||
: getSummarizeModel(
|
||||
session.mask.modelConfig.model,
|
||||
session.mask.modelConfig.providerName,
|
||||
);
|
||||
const api: ClientApi = getClientApi(providerName as ServiceProvider);
|
||||
|
||||
// remove error messages if any
|
||||
const messages = session.messages;
|
||||
|
@ -610,17 +650,19 @@ export const useChatStore = createPersistStore(
|
|||
api.llm.chat({
|
||||
messages: topicMessages,
|
||||
config: {
|
||||
model: modelConfig.compressModel,
|
||||
model,
|
||||
stream: false,
|
||||
providerName,
|
||||
},
|
||||
onFinish(message) {
|
||||
if (!isValidMessage(message)) return;
|
||||
get().updateCurrentSession(
|
||||
onFinish(message, responseRes) {
|
||||
if (responseRes?.status === 200) {
|
||||
get().updateTargetSession(
|
||||
session,
|
||||
(session) =>
|
||||
(session.topic =
|
||||
message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
|
||||
);
|
||||
}
|
||||
},
|
||||
});
|
||||
}
|
||||
|
@ -634,7 +676,7 @@ export const useChatStore = createPersistStore(
|
|||
|
||||
const historyMsgLength = countMessages(toBeSummarizedMsgs);
|
||||
|
||||
if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
|
||||
if (historyMsgLength > (modelConfig?.max_tokens || 4000)) {
|
||||
const n = toBeSummarizedMsgs.length;
|
||||
toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
|
||||
Math.max(0, n - modelConfig.historyMessageCount),
|
||||
|
@ -674,43 +716,44 @@ export const useChatStore = createPersistStore(
|
|||
config: {
|
||||
...modelcfg,
|
||||
stream: true,
|
||||
model: modelConfig.compressModel,
|
||||
model,
|
||||
providerName,
|
||||
},
|
||||
onUpdate(message) {
|
||||
session.memoryPrompt = message;
|
||||
},
|
||||
onFinish(message) {
|
||||
onFinish(message, responseRes) {
|
||||
if (responseRes?.status === 200) {
|
||||
console.log("[Memory] ", message);
|
||||
get().updateCurrentSession((session) => {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.lastSummarizeIndex = lastSummarizeIndex;
|
||||
session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
|
||||
});
|
||||
}
|
||||
},
|
||||
onError(err) {
|
||||
console.error("[Summarize] ", err);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
function isValidMessage(message: any): boolean {
|
||||
return typeof message === "string" && !message.startsWith("```json");
|
||||
}
|
||||
},
|
||||
|
||||
updateStat(message: ChatMessage) {
|
||||
get().updateCurrentSession((session) => {
|
||||
updateStat(message: ChatMessage, session: ChatSession) {
|
||||
get().updateTargetSession(session, (session) => {
|
||||
session.stat.charCount += message.content.length;
|
||||
// TODO: should update chat count and word count
|
||||
});
|
||||
},
|
||||
|
||||
updateCurrentSession(updater: (session: ChatSession) => void) {
|
||||
updateTargetSession(
|
||||
targetSession: ChatSession,
|
||||
updater: (session: ChatSession) => void,
|
||||
) {
|
||||
const sessions = get().sessions;
|
||||
const index = get().currentSessionIndex;
|
||||
const index = sessions.findIndex((s) => s.id === targetSession.id);
|
||||
if (index < 0) return;
|
||||
updater(sessions[index]);
|
||||
set(() => ({ sessions }));
|
||||
},
|
||||
|
||||
async clearAllData() {
|
||||
await indexedDBStorage.clear();
|
||||
localStorage.clear();
|
||||
|
@ -727,7 +770,7 @@ export const useChatStore = createPersistStore(
|
|||
},
|
||||
{
|
||||
name: StoreKey.Chat,
|
||||
version: 3.2,
|
||||
version: 3.3,
|
||||
migrate(persistedState, version) {
|
||||
const state = persistedState as any;
|
||||
const newState = JSON.parse(
|
||||
|
@ -783,6 +826,14 @@ export const useChatStore = createPersistStore(
|
|||
config.modelConfig.compressProviderName;
|
||||
});
|
||||
}
|
||||
// revert default summarize model for every session
|
||||
if (version < 3.3) {
|
||||
newState.sessions.forEach((s) => {
|
||||
const config = useAppConfig.getState();
|
||||
s.mask.modelConfig.compressModel = "";
|
||||
s.mask.modelConfig.compressProviderName = "";
|
||||
});
|
||||
}
|
||||
|
||||
return newState as any;
|
||||
},
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import { LLMModel } from "../client/api";
|
||||
import { DalleSize, DalleQuality, DalleStyle } from "../typing";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import {
|
||||
DEFAULT_INPUT_TEMPLATE,
|
||||
|
@ -15,6 +15,7 @@ import {
|
|||
ServiceProvider,
|
||||
} from "../constant";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import type { Voice } from "rt-client";
|
||||
|
||||
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
|
||||
export type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
|
||||
|
@ -52,6 +53,8 @@ export const DEFAULT_CONFIG = {
|
|||
|
||||
enableArtifacts: true, // show artifacts config
|
||||
|
||||
enableCodeFold: true, // code fold config
|
||||
|
||||
disablePromptHint: false,
|
||||
|
||||
dontShowMaskSplashScreen: false, // dont show splash screen when create chat
|
||||
|
@ -71,11 +74,11 @@ export const DEFAULT_CONFIG = {
|
|||
sendMemory: true,
|
||||
historyMessageCount: 4,
|
||||
compressMessageLengthThreshold: 1000,
|
||||
compressModel: "gpt-4o-mini" as ModelType,
|
||||
compressProviderName: "OpenAI" as ServiceProvider,
|
||||
compressModel: "",
|
||||
compressProviderName: "",
|
||||
enableInjectSystemPrompts: true,
|
||||
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
|
||||
size: "1024x1024" as DalleSize,
|
||||
size: "1024x1024" as ModelSize,
|
||||
quality: "standard" as DalleQuality,
|
||||
style: "vivid" as DalleStyle,
|
||||
},
|
||||
|
@ -88,12 +91,26 @@ export const DEFAULT_CONFIG = {
|
|||
voice: DEFAULT_TTS_VOICE,
|
||||
speed: 1.0,
|
||||
},
|
||||
|
||||
realtimeConfig: {
|
||||
enable: false,
|
||||
provider: "OpenAI" as ServiceProvider,
|
||||
model: "gpt-4o-realtime-preview-2024-10-01",
|
||||
apiKey: "",
|
||||
azure: {
|
||||
endpoint: "",
|
||||
deployment: "",
|
||||
},
|
||||
temperature: 0.9,
|
||||
voice: "alloy" as Voice,
|
||||
},
|
||||
};
|
||||
|
||||
export type ChatConfig = typeof DEFAULT_CONFIG;
|
||||
|
||||
export type ModelConfig = ChatConfig["modelConfig"];
|
||||
export type TTSConfig = ChatConfig["ttsConfig"];
|
||||
export type RealtimeConfig = ChatConfig["realtimeConfig"];
|
||||
|
||||
export function limitNumber(
|
||||
x: number,
|
||||
|
@ -178,7 +195,7 @@ export const useAppConfig = createPersistStore(
|
|||
}),
|
||||
{
|
||||
name: StoreKey.Config,
|
||||
version: 4,
|
||||
version: 4.1,
|
||||
|
||||
merge(persistedState, currentState) {
|
||||
const state = persistedState as ChatConfig | undefined;
|
||||
|
@ -231,7 +248,7 @@ export const useAppConfig = createPersistStore(
|
|||
: config?.template ?? DEFAULT_INPUT_TEMPLATE;
|
||||
}
|
||||
|
||||
if (version < 4) {
|
||||
if (version < 4.1) {
|
||||
state.modelConfig.compressModel =
|
||||
DEFAULT_CONFIG.modelConfig.compressModel;
|
||||
state.modelConfig.compressProviderName =
|
||||
|
|
|
@ -19,6 +19,7 @@ export type Mask = {
|
|||
builtin: boolean;
|
||||
plugin?: string[];
|
||||
enableArtifacts?: boolean;
|
||||
enableCodeFold?: boolean;
|
||||
};
|
||||
|
||||
export const DEFAULT_MASK_STATE = {
|
||||
|
|
|
@ -4,10 +4,10 @@ import { nanoid } from "nanoid";
|
|||
import { createPersistStore } from "../utils/store";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import yaml from "js-yaml";
|
||||
import { adapter } from "../utils";
|
||||
import { adapter, getOperationId } from "../utils";
|
||||
import { useAccessStore } from "./access";
|
||||
|
||||
const isApp = getClientConfig()?.isApp;
|
||||
const isApp = getClientConfig()?.isApp !== false;
|
||||
|
||||
export type Plugin = {
|
||||
id: string;
|
||||
|
@ -116,7 +116,7 @@ export const FunctionToolService = {
|
|||
return {
|
||||
type: "function",
|
||||
function: {
|
||||
name: o.operationId,
|
||||
name: getOperationId(o),
|
||||
description: o.description || o.summary,
|
||||
parameters: parameters,
|
||||
},
|
||||
|
@ -124,7 +124,7 @@ export const FunctionToolService = {
|
|||
}),
|
||||
funcs: operations.reduce((s, o) => {
|
||||
// @ts-ignore
|
||||
s[o.operationId] = function (args) {
|
||||
s[getOperationId(o)] = function (args) {
|
||||
const parameters: Record<string, any> = {};
|
||||
if (o.parameters instanceof Array) {
|
||||
o.parameters.forEach((p) => {
|
||||
|
@ -139,8 +139,8 @@ export const FunctionToolService = {
|
|||
} else if (authLocation == "body") {
|
||||
args[headerName] = tokenValue;
|
||||
}
|
||||
// @ts-ignore
|
||||
return api.client[o.operationId](
|
||||
// @ts-ignore if o.operationId is null, then using o.path and o.method
|
||||
return api.client.paths[o.path][o.method](
|
||||
parameters,
|
||||
args,
|
||||
api.axiosConfigDefaults,
|
||||
|
|
|
@ -12,7 +12,6 @@ import { downloadAs, readFromFile } from "../utils";
|
|||
import { showToast } from "../components/ui-lib";
|
||||
import Locale from "../locales";
|
||||
import { createSyncClient, ProviderType } from "../utils/cloud";
|
||||
import { corsPath } from "../utils/cors";
|
||||
|
||||
export interface WebDavConfig {
|
||||
server: string;
|
||||
|
@ -26,7 +25,7 @@ export type SyncStore = GetStoreState<typeof useSyncStore>;
|
|||
const DEFAULT_SYNC_STATE = {
|
||||
provider: ProviderType.WebDAV,
|
||||
useProxy: true,
|
||||
proxyUrl: corsPath(ApiPath.Cors),
|
||||
proxyUrl: ApiPath.Cors as string,
|
||||
|
||||
webdav: {
|
||||
endpoint: "",
|
||||
|
|
|
@ -6,6 +6,7 @@ import {
|
|||
} from "../constant";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import { clientUpdate } from "../utils";
|
||||
import ChatGptIcon from "../icons/chatgpt.png";
|
||||
import Locale from "../locales";
|
||||
import { ClientApi } from "../client/api";
|
||||
|
@ -119,6 +120,7 @@ export const useUpdateStore = createPersistStore(
|
|||
icon: `${ChatGptIcon.src}`,
|
||||
sound: "Default",
|
||||
});
|
||||
clientUpdate();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -11,3 +11,14 @@ export interface RequestMessage {
|
|||
export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";
|
||||
export type DalleQuality = "standard" | "hd";
|
||||
export type DalleStyle = "vivid" | "natural";
|
||||
|
||||
export type ModelSize =
|
||||
| "1024x1024"
|
||||
| "1792x1024"
|
||||
| "1024x1792"
|
||||
| "768x1344"
|
||||
| "864x1152"
|
||||
| "1344x768"
|
||||
| "1152x864"
|
||||
| "1440x720"
|
||||
| "720x1440";
|
||||
|
|
126
app/utils.ts
|
@ -2,8 +2,12 @@ import { useEffect, useState } from "react";
|
|||
import { showToast } from "./components/ui-lib";
|
||||
import Locale from "./locales";
|
||||
import { RequestMessage } from "./client/api";
|
||||
import { ServiceProvider, REQUEST_TIMEOUT_MS } from "./constant";
|
||||
import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
|
||||
import { ServiceProvider } from "./constant";
|
||||
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
|
||||
import { fetch as tauriStreamFetch } from "./utils/stream";
|
||||
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
|
||||
import { useAccessStore } from "./store";
|
||||
import { ModelSize } from "./typing";
|
||||
|
||||
export function trimTopic(topic: string) {
|
||||
// Fix an issue where double quotes still show in the Indonesian language
|
||||
|
@ -251,21 +255,14 @@ export function getMessageImages(message: RequestMessage): string[] {
|
|||
}
|
||||
|
||||
export function isVisionModel(model: string) {
|
||||
// Note: This is a better way using the TypeScript feature instead of `&&` or `||` (ts v5.5.0-dev.20240314 I've been using)
|
||||
|
||||
const visionKeywords = [
|
||||
"vision",
|
||||
"claude-3",
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
];
|
||||
const isGpt4Turbo =
|
||||
model.includes("gpt-4-turbo") && !model.includes("preview");
|
||||
|
||||
const visionModels = useAccessStore.getState().visionModels;
|
||||
const envVisionModels = visionModels?.split(",").map((m) => m.trim());
|
||||
if (envVisionModels?.includes(model)) {
|
||||
return true;
|
||||
}
|
||||
return (
|
||||
visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo
|
||||
!EXCLUDE_VISION_MODEL_REGEXES.some((regex) => regex.test(model)) &&
|
||||
VISION_MODEL_REGEXES.some((regex) => regex.test(model))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -273,17 +270,43 @@ export function isDalle3(model: string) {
|
|||
return "dall-e-3" === model;
|
||||
}
|
||||
|
||||
export function getModelSizes(model: string): ModelSize[] {
|
||||
if (isDalle3(model)) {
|
||||
return ["1024x1024", "1792x1024", "1024x1792"];
|
||||
}
|
||||
if (model.toLowerCase().includes("cogview")) {
|
||||
return [
|
||||
"1024x1024",
|
||||
"768x1344",
|
||||
"864x1152",
|
||||
"1344x768",
|
||||
"1152x864",
|
||||
"1440x720",
|
||||
"720x1440",
|
||||
];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export function supportsCustomSize(model: string): boolean {
|
||||
return getModelSizes(model).length > 0;
|
||||
}
|
||||
|
||||
export function showPlugins(provider: ServiceProvider, model: string) {
|
||||
if (
|
||||
provider == ServiceProvider.OpenAI ||
|
||||
provider == ServiceProvider.Azure ||
|
||||
provider == ServiceProvider.Moonshot
|
||||
provider == ServiceProvider.Moonshot ||
|
||||
provider == ServiceProvider.ChatGLM
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
if (provider == ServiceProvider.Anthropic && !model.includes("claude-2")) {
|
||||
return true;
|
||||
}
|
||||
if (provider == ServiceProvider.Google && !model.includes("vision")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -292,30 +315,23 @@ export function fetch(
|
|||
options?: Record<string, unknown>,
|
||||
): Promise<any> {
|
||||
if (window.__TAURI__) {
|
||||
const payload = options?.body || options?.data;
|
||||
return tauriFetch(url, {
|
||||
...options,
|
||||
body:
|
||||
payload &&
|
||||
({
|
||||
type: "Text",
|
||||
payload,
|
||||
} as any),
|
||||
timeout: ((options?.timeout as number) || REQUEST_TIMEOUT_MS) / 1000,
|
||||
responseType:
|
||||
options?.responseType == "text" ? ResponseType.Text : ResponseType.JSON,
|
||||
} as any);
|
||||
return tauriStreamFetch(url, options);
|
||||
}
|
||||
return window.fetch(url, options);
|
||||
}
|
||||
|
||||
export function adapter(config: Record<string, unknown>) {
|
||||
const { baseURL, url, params, ...rest } = config;
|
||||
const { baseURL, url, params, data: body, ...rest } = config;
|
||||
const path = baseURL ? `${baseURL}${url}` : url;
|
||||
const fetchUrl = params
|
||||
? `${path}?${new URLSearchParams(params as any).toString()}`
|
||||
: path;
|
||||
return fetch(fetchUrl as string, { ...rest, responseType: "text" });
|
||||
return fetch(fetchUrl as string, { ...rest, body }).then((res) => {
|
||||
const { status, headers, statusText } = res;
|
||||
return res
|
||||
.text()
|
||||
.then((data: string) => ({ status, statusText, headers, data }));
|
||||
});
|
||||
}
|
||||
|
||||
export function safeLocalStorage(): {
|
||||
|
@ -377,3 +393,49 @@ export function safeLocalStorage(): {
|
|||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function getOperationId(operation: {
|
||||
operationId?: string;
|
||||
method: string;
|
||||
path: string;
|
||||
}) {
|
||||
// pattern '^[a-zA-Z0-9_-]+$'
|
||||
return (
|
||||
operation?.operationId ||
|
||||
`${operation.method.toUpperCase()}${operation.path.replaceAll("/", "_")}`
|
||||
);
|
||||
}
|
||||
|
||||
export function clientUpdate() {
|
||||
// this a wild for updating client app
|
||||
return window.__TAURI__?.updater
|
||||
.checkUpdate()
|
||||
.then((updateResult) => {
|
||||
if (updateResult.shouldUpdate) {
|
||||
window.__TAURI__?.updater
|
||||
.installUpdate()
|
||||
.then((result) => {
|
||||
showToast(Locale.Settings.Update.Success);
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("[Install Update Error]", e);
|
||||
showToast(Locale.Settings.Update.Failed);
|
||||
});
|
||||
}
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("[Check Update Error]", e);
|
||||
showToast(Locale.Settings.Update.Failed);
|
||||
});
|
||||
}
|
||||
|
||||
// https://gist.github.com/iwill/a83038623ba4fef6abb9efca87ae9ccb
|
||||
export function semverCompare(a: string, b: string) {
|
||||
if (a.startsWith(b + "-")) return -1;
|
||||
if (b.startsWith(a + "-")) return 1;
|
||||
return a.localeCompare(b, undefined, {
|
||||
numeric: true,
|
||||
sensitivity: "case",
|
||||
caseFirst: "upper",
|
||||
});
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import {
|
|||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "./format";
|
||||
import { fetch as tauriFetch } from "./stream";
|
||||
|
||||
export function compressImage(file: Blob, maxSize: number): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
|
@ -137,7 +138,7 @@ export function uploadImage(file: Blob): Promise<string> {
|
|||
})
|
||||
.then((res) => res.json())
|
||||
.then((res) => {
|
||||
console.log("res", res);
|
||||
// console.log("res", res);
|
||||
if (res?.code == 0 && res?.data) {
|
||||
return res?.data;
|
||||
}
|
||||
|
@ -173,6 +174,7 @@ export function stream(
|
|||
let finished = false;
|
||||
let running = false;
|
||||
let runTools: any[] = [];
|
||||
let responseRes: Response;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
|
@ -221,7 +223,12 @@ export function stream(
|
|||
),
|
||||
)
|
||||
.then((res) => {
|
||||
const content = JSON.stringify(res.data);
|
||||
let content = res.data || res?.statusText;
|
||||
// hotfix #5614
|
||||
content =
|
||||
typeof content === "string"
|
||||
? content
|
||||
: JSON.stringify(content);
|
||||
if (res.status >= 300) {
|
||||
return Promise.reject(content);
|
||||
}
|
||||
|
@ -236,10 +243,15 @@ export function stream(
|
|||
return content;
|
||||
})
|
||||
.catch((e) => {
|
||||
options?.onAfterTool?.({ ...tool, isError: true });
|
||||
options?.onAfterTool?.({
|
||||
...tool,
|
||||
isError: true,
|
||||
errorMsg: e.toString(),
|
||||
});
|
||||
return e.toString();
|
||||
})
|
||||
.then((content) => ({
|
||||
name: tool.function.name,
|
||||
role: "tool",
|
||||
content,
|
||||
tool_call_id: tool.id,
|
||||
|
@ -261,7 +273,7 @@ export function stream(
|
|||
}
|
||||
console.debug("[ChatAPI] end");
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
options.onFinish(responseText + remainText, responseRes); // 将res传递给onFinish
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -287,11 +299,13 @@ export function stream(
|
|||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
fetchEventSource(chatPath, {
|
||||
fetch: tauriFetch as any,
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Request] response content type: ", contentType);
|
||||
responseRes = res;
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
import { getClientConfig } from "../config/client";
|
||||
import { DEFAULT_API_HOST } from "../constant";
|
||||
|
||||
export function corsPath(path: string) {
|
||||
const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_API_HOST}` : "";
|
||||
|
||||
if (baseUrl === "" && path === "") {
|
||||
return "";
|
||||
}
|
||||
if (!path.startsWith("/")) {
|
||||
path = "/" + path;
|
||||
}
|
||||
|
||||
if (!path.endsWith("/")) {
|
||||
path += "/";
|
||||
}
|
||||
|
||||
return `${baseUrl}${path}`;
|
||||
}
|
|
@ -37,6 +37,17 @@ const sortModelTable = (models: ReturnType<typeof collectModels>) =>
|
|||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* get model name and provider from a formatted string,
|
||||
* e.g. `gpt-4@OpenAi` or `claude-3-5-sonnet@20240620@Google`
|
||||
* @param modelWithProvider model name with provider separated by last `@` char,
|
||||
* @returns [model, provider] tuple, if no `@` char found, provider is undefined
|
||||
*/
|
||||
export function getModelProvider(modelWithProvider: string): [string, string?] {
|
||||
const [model, provider] = modelWithProvider.split(/@(?!.*@)/);
|
||||
return [model, provider];
|
||||
}
|
||||
|
||||
export function collectModelTable(
|
||||
models: readonly LLMModel[],
|
||||
customModels: string,
|
||||
|
@ -79,10 +90,10 @@ export function collectModelTable(
|
|||
);
|
||||
} else {
|
||||
// 1. find model by name, and set available value
|
||||
const [customModelName, customProviderName] = name.split("@");
|
||||
const [customModelName, customProviderName] = getModelProvider(name);
|
||||
let count = 0;
|
||||
for (const fullName in modelTable) {
|
||||
const [modelName, providerName] = fullName.split("@");
|
||||
const [modelName, providerName] = getModelProvider(fullName);
|
||||
if (
|
||||
customModelName == modelName &&
|
||||
(customProviderName === undefined ||
|
||||
|
@ -102,7 +113,7 @@ export function collectModelTable(
|
|||
}
|
||||
// 2. if model not exists, create new model with available value
|
||||
if (count === 0) {
|
||||
let [customModelName, customProviderName] = name.split("@");
|
||||
let [customModelName, customProviderName] = getModelProvider(name);
|
||||
const provider = customProvider(
|
||||
customProviderName || customModelName,
|
||||
);
|
||||
|
@ -139,7 +150,7 @@ export function collectModelTableWithDefaultModel(
|
|||
for (const key of Object.keys(modelTable)) {
|
||||
if (
|
||||
modelTable[key].available &&
|
||||
key.split("@").shift() == defaultModel
|
||||
getModelProvider(key)[0] == defaultModel
|
||||
) {
|
||||
modelTable[key].isDefault = true;
|
||||
break;
|
||||
|
@ -191,3 +202,52 @@ export function isModelAvailableInServer(
|
|||
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
|
||||
return modelTable[fullName]?.available === false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model name is a GPT-4 related model
|
||||
*
|
||||
* @param modelName The name of the model to check
|
||||
* @returns True if the model is a GPT-4 related model (excluding gpt-4o-mini)
|
||||
*/
|
||||
export function isGPT4Model(modelName: string): boolean {
|
||||
return (
|
||||
(modelName.startsWith("gpt-4") ||
|
||||
modelName.startsWith("chatgpt-4o") ||
|
||||
modelName.startsWith("o1")) &&
|
||||
!modelName.startsWith("gpt-4o-mini")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a model is not available on any of the specified providers in the server.
|
||||
*
|
||||
* @param {string} customModels - A string of custom models, comma-separated.
|
||||
* @param {string} modelName - The name of the model to check.
|
||||
* @param {string|string[]} providerNames - A string or array of provider names to check against.
|
||||
*
|
||||
* @returns {boolean} True if the model is not available on any of the specified providers, false otherwise.
|
||||
*/
|
||||
export function isModelNotavailableInServer(
|
||||
customModels: string,
|
||||
modelName: string,
|
||||
providerNames: string | string[],
|
||||
): boolean {
|
||||
// Check DISABLE_GPT4 environment variable
|
||||
if (
|
||||
process.env.DISABLE_GPT4 === "1" &&
|
||||
isGPT4Model(modelName.toLowerCase())
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
|
||||
|
||||
const providerNamesArray = Array.isArray(providerNames)
|
||||
? providerNames
|
||||
: [providerNames];
|
||||
for (const providerName of providerNamesArray) {
|
||||
const fullName = `${modelName}@${providerName.toLowerCase()}`;
|
||||
if (modelTable?.[fullName]?.available === true) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
// using tauri command to send request
|
||||
// see src-tauri/src/stream.rs, and src-tauri/src/main.rs
|
||||
// 1. invoke('stream_fetch', {url, method, headers, body}), get response with headers.
|
||||
// 2. listen event: `stream-response` multi times to get body
|
||||
|
||||
type ResponseEvent = {
|
||||
id: number;
|
||||
payload: {
|
||||
request_id: number;
|
||||
status?: number;
|
||||
chunk?: number[];
|
||||
};
|
||||
};
|
||||
|
||||
type StreamResponse = {
|
||||
request_id: number;
|
||||
status: number;
|
||||
status_text: string;
|
||||
headers: Record<string, string>;
|
||||
};
|
||||
|
||||
export function fetch(url: string, options?: RequestInit): Promise<Response> {
|
||||
if (window.__TAURI__) {
|
||||
const {
|
||||
signal,
|
||||
method = "GET",
|
||||
headers: _headers = {},
|
||||
body = [],
|
||||
} = options || {};
|
||||
let unlisten: Function | undefined;
|
||||
let setRequestId: Function | undefined;
|
||||
const requestIdPromise = new Promise((resolve) => (setRequestId = resolve));
|
||||
const ts = new TransformStream();
|
||||
const writer = ts.writable.getWriter();
|
||||
|
||||
let closed = false;
|
||||
const close = () => {
|
||||
if (closed) return;
|
||||
closed = true;
|
||||
unlisten && unlisten();
|
||||
writer.ready.then(() => {
|
||||
writer.close().catch((e) => console.error(e));
|
||||
});
|
||||
};
|
||||
|
||||
if (signal) {
|
||||
signal.addEventListener("abort", () => close());
|
||||
}
|
||||
// @ts-ignore 2. listen response multi times, and write to Response.body
|
||||
window.__TAURI__.event
|
||||
.listen("stream-response", (e: ResponseEvent) =>
|
||||
requestIdPromise.then((request_id) => {
|
||||
const { request_id: rid, chunk, status } = e?.payload || {};
|
||||
if (request_id != rid) {
|
||||
return;
|
||||
}
|
||||
if (chunk) {
|
||||
writer.ready.then(() => {
|
||||
writer.write(new Uint8Array(chunk));
|
||||
});
|
||||
} else if (status === 0) {
|
||||
// end of body
|
||||
close();
|
||||
}
|
||||
}),
|
||||
)
|
||||
.then((u: Function) => (unlisten = u));
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
Accept: "application/json, text/plain, */*",
|
||||
"Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
|
||||
"User-Agent": navigator.userAgent,
|
||||
};
|
||||
for (const item of new Headers(_headers || {})) {
|
||||
headers[item[0]] = item[1];
|
||||
}
|
||||
return window.__TAURI__
|
||||
.invoke("stream_fetch", {
|
||||
method: method.toUpperCase(),
|
||||
url,
|
||||
headers,
|
||||
// TODO FormData
|
||||
body:
|
||||
typeof body === "string"
|
||||
? Array.from(new TextEncoder().encode(body))
|
||||
: [],
|
||||
})
|
||||
.then((res: StreamResponse) => {
|
||||
const { request_id, status, status_text: statusText, headers } = res;
|
||||
setRequestId?.(request_id);
|
||||
const response = new Response(ts.readable, {
|
||||
status,
|
||||
statusText,
|
||||
headers,
|
||||
});
|
||||
if (status >= 300) {
|
||||
setTimeout(close, 100);
|
||||
}
|
||||
return response;
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("stream error", e);
|
||||
// throw e;
|
||||
return new Response("", { status: 599 });
|
||||
});
|
||||
}
|
||||
return window.fetch(url, options);
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
# 宝塔面板 的部署说明
|
||||
|
||||
## 拥有自己的宝塔
|
||||
当你需要通过 宝塔面板 部署本项目之前,需要在服务器上先安装好 宝塔面板工具。 接下来的 部署流程 都建立在已有宝塔面板的前提下。宝塔安装请参考 ([宝塔官网](https://www.bt.cn/new/download.html))
|
||||
|
||||
> 注意:本项目需要宝塔面板版本 9.2.0 及以上
|
||||
|
||||
## 一键安装
|
||||

|
||||
1. 在 宝塔面板 -> Docker -> 应用商店 页面,搜索 ChatGPT-Next-Web 找到本项目的docker应用;
|
||||
2. 点击 安装 开始部署本项目
|
||||
|
||||

|
||||
1. 在项目配置页,根据要求开始配置环境变量;
|
||||
2. 如勾选 允许外部访问 配置,请注意为配置的 web端口 开放安全组端口访问权限;
|
||||
3. 请确保你添加了正确的 Open Api Key,否则无法使用;当配置 OpenAI官方 提供的key(国内无法访问),请配置代理地址;
|
||||
4. 建议配置 访问权限密码,否则部署后所有人均可使用已配置的 Open Api Key(当允许外部访问时);
|
||||
5. 点击 确认 开始自动部署。
|
||||
|
||||
## 如何访问
|
||||

|
||||
通过根据 服务器IP地址 和配置的 web端口 http://$(host):$(port),在浏览器中打开 ChatGPT-Next-Web。
|
||||
|
||||

|
||||
若配置了 访问权限密码,访问大模型前需要登录,请点击 登录,获取访问权限。
|
||||
|
||||

|
||||
|
||||

|
After Width: | Height: | Size: 161 KiB |
After Width: | Height: | Size: 196 KiB |
After Width: | Height: | Size: 117 KiB |
After Width: | Height: | Size: 159 KiB |
After Width: | Height: | Size: 74 KiB |
After Width: | Height: | Size: 146 KiB |
|
@ -0,0 +1,21 @@
|
|||
import type { Config } from "jest";
|
||||
import nextJest from "next/jest.js";
|
||||
|
||||
const createJestConfig = nextJest({
|
||||
// Provide the path to your Next.js app to load next.config.js and .env files in your test environment
|
||||
dir: "./",
|
||||
});
|
||||
|
||||
// Add any custom config to be passed to Jest
|
||||
const config: Config = {
|
||||
coverageProvider: "v8",
|
||||
testEnvironment: "jsdom",
|
||||
testMatch: ["**/*.test.js", "**/*.test.ts", "**/*.test.jsx", "**/*.test.tsx"],
|
||||
setupFilesAfterEnv: ["<rootDir>/jest.setup.ts"],
|
||||
moduleNameMapper: {
|
||||
"^@/(.*)$": "<rootDir>/$1",
|
||||
},
|
||||
};
|
||||
|
||||
// createJestConfig is exported this way to ensure that next/jest can load the Next.js config which is async
|
||||
export default createJestConfig(config);
|
|
@ -0,0 +1,24 @@
|
|||
// Learn more: https://github.com/testing-library/jest-dom
|
||||
import "@testing-library/jest-dom";
|
||||
|
||||
global.fetch = jest.fn(() =>
|
||||
Promise.resolve({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: () => Promise.resolve({}),
|
||||
headers: new Headers(),
|
||||
redirected: false,
|
||||
statusText: "OK",
|
||||
type: "basic",
|
||||
url: "",
|
||||
clone: function () {
|
||||
return this;
|
||||
},
|
||||
body: null,
|
||||
bodyUsed: false,
|
||||
arrayBuffer: () => Promise.resolve(new ArrayBuffer(0)),
|
||||
blob: () => Promise.resolve(new Blob()),
|
||||
formData: () => Promise.resolve(new FormData()),
|
||||
text: () => Promise.resolve(""),
|
||||
}),
|
||||
);
|
|
@ -94,6 +94,10 @@ if (mode !== "export") {
|
|||
source: "/sharegpt",
|
||||
destination: "https://sharegpt.com/api/conversations",
|
||||
},
|
||||
{
|
||||
source: "/api/proxy/alibaba/:path*",
|
||||
destination: "https://dashscope.aliyuncs.com/api/:path*",
|
||||
},
|
||||
];
|
||||
|
||||
return {
|
||||
|
|
17
package.json
|
@ -15,7 +15,9 @@
|
|||
"app:build": "yarn mask && yarn tauri build",
|
||||
"prompts": "node ./scripts/fetch-prompts.mjs",
|
||||
"prepare": "husky install",
|
||||
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev"
|
||||
"proxy-dev": "sh ./scripts/init-proxy.sh && proxychains -f ./scripts/proxychains.conf yarn dev",
|
||||
"test": "jest --watch",
|
||||
"test:ci": "jest --ci"
|
||||
},
|
||||
"dependencies": {
|
||||
"@fortaine/fetch-event-source": "^3.0.6",
|
||||
|
@ -25,6 +27,7 @@
|
|||
"@vercel/analytics": "^0.1.11",
|
||||
"@vercel/speed-insights": "^1.0.2",
|
||||
"axios": "^1.7.5",
|
||||
"clsx": "^2.1.1",
|
||||
"emoji-picker-react": "^4.9.2",
|
||||
"fuse.js": "^7.0.0",
|
||||
"heic2any": "^0.0.4",
|
||||
|
@ -32,8 +35,8 @@
|
|||
"idb-keyval": "^6.2.1",
|
||||
"lodash-es": "^4.17.21",
|
||||
"lowlight": "^2.0.0",
|
||||
"mermaid": "^10.6.1",
|
||||
"markdown-to-txt": "^2.0.1",
|
||||
"mermaid": "^10.6.1",
|
||||
"nanoid": "^5.0.3",
|
||||
"next": "^14.1.1",
|
||||
"node-fetch": "^3.3.1",
|
||||
|
@ -50,11 +53,16 @@
|
|||
"sass": "^1.59.2",
|
||||
"spark-md5": "^3.0.2",
|
||||
"use-debounce": "^9.0.4",
|
||||
"zustand": "^4.3.8"
|
||||
"zustand": "^4.3.8",
|
||||
"rt-client": "https://github.com/Azure-Samples/aoai-realtime-audio-sdk/releases/download/js/v0.5.0/rt-client-0.5.0.tgz"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tauri-apps/api": "^1.6.0",
|
||||
"@tauri-apps/cli": "1.5.11",
|
||||
"@testing-library/dom": "^10.4.0",
|
||||
"@testing-library/jest-dom": "^6.6.3",
|
||||
"@testing-library/react": "^16.1.0",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"@types/lodash-es": "^4.17.12",
|
||||
"@types/node": "^20.11.30",
|
||||
|
@ -70,8 +78,11 @@
|
|||
"eslint-plugin-prettier": "^5.1.3",
|
||||
"eslint-plugin-unused-imports": "^3.2.0",
|
||||
"husky": "^8.0.0",
|
||||
"jest": "^29.7.0",
|
||||
"jest-environment-jsdom": "^29.7.0",
|
||||
"lint-staged": "^13.2.2",
|
||||
"prettier": "^3.0.2",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.16.0",
|
||||
"typescript": "5.2.2",
|
||||
"watch": "^1.0.2",
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
// @ts-nocheck
|
||||
class AudioRecorderProcessor extends AudioWorkletProcessor {
|
||||
constructor() {
|
||||
super();
|
||||
this.isRecording = false;
|
||||
this.bufferSize = 2400; // 100ms at 24kHz
|
||||
this.currentBuffer = [];
|
||||
|
||||
this.port.onmessage = (event) => {
|
||||
if (event.data.command === "START_RECORDING") {
|
||||
this.isRecording = true;
|
||||
} else if (event.data.command === "STOP_RECORDING") {
|
||||
this.isRecording = false;
|
||||
|
||||
if (this.currentBuffer.length > 0) {
|
||||
this.sendBuffer();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
sendBuffer() {
|
||||
if (this.currentBuffer.length > 0) {
|
||||
const audioData = new Float32Array(this.currentBuffer);
|
||||
this.port.postMessage({
|
||||
eventType: "audio",
|
||||
audioData: audioData,
|
||||
});
|
||||
this.currentBuffer = [];
|
||||
}
|
||||
}
|
||||
|
||||
process(inputs) {
|
||||
const input = inputs[0];
|
||||
if (input.length > 0 && this.isRecording) {
|
||||
const audioData = input[0];
|
||||
|
||||
this.currentBuffer.push(...audioData);
|
||||
|
||||
if (this.currentBuffer.length >= this.bufferSize) {
|
||||
this.sendBuffer();
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
registerProcessor("audio-recorder-processor", AudioRecorderProcessor);
|
|
@ -348,9 +348,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
|
|||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.4.0"
|
||||
version = "1.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
|
||||
checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
@ -942,9 +942,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
|
|||
|
||||
[[package]]
|
||||
name = "form_urlencoded"
|
||||
version = "1.1.0"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8"
|
||||
checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456"
|
||||
dependencies = [
|
||||
"percent-encoding",
|
||||
]
|
||||
|
@ -970,9 +970,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "futures-core"
|
||||
version = "0.3.28"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c"
|
||||
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
|
||||
|
||||
[[package]]
|
||||
name = "futures-executor"
|
||||
|
@ -987,9 +987,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "futures-io"
|
||||
version = "0.3.28"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964"
|
||||
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
|
||||
|
||||
[[package]]
|
||||
name = "futures-lite"
|
||||
|
@ -1008,9 +1008,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "futures-macro"
|
||||
version = "0.3.28"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72"
|
||||
checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -1019,21 +1019,21 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "futures-sink"
|
||||
version = "0.3.29"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817"
|
||||
checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
|
||||
|
||||
[[package]]
|
||||
name = "futures-task"
|
||||
version = "0.3.28"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65"
|
||||
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
|
||||
|
||||
[[package]]
|
||||
name = "futures-util"
|
||||
version = "0.3.28"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533"
|
||||
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
|
||||
dependencies = [
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
|
@ -1555,9 +1555,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
|
|||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "0.3.0"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6"
|
||||
checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
|
||||
dependencies = [
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
|
@ -1986,6 +1986,10 @@ checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
|
|||
name = "nextchat"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-util",
|
||||
"percent-encoding",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tauri",
|
||||
|
@ -2281,9 +2285,9 @@ checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd"
|
|||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.2.0"
|
||||
version = "2.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
|
||||
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
|
@ -2545,9 +2549,9 @@ checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068"
|
|||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.58"
|
||||
version = "1.0.86"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8"
|
||||
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
@ -3889,9 +3893,9 @@ checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
|
|||
|
||||
[[package]]
|
||||
name = "url"
|
||||
version = "2.3.1"
|
||||
version = "2.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643"
|
||||
checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
|
||||
dependencies = [
|
||||
"form_urlencoded",
|
||||
"idna",
|
||||
|
|