Compare commits

...

464 Commits

Author SHA1 Message Date
RiverRay 48469bd8ca
Merge pull request #6392 from ChatGPTNextWeb/Leizhenpeng-patch-6
Update README.md
2025-03-20 17:52:02 +08:00
RiverRay 5a5e887f2b
Update README.md 2025-03-20 17:51:47 +08:00
RiverRay b6f5d75656
Merge pull request #6344 from vangie/fix/jest-setup-esm
test: fix unit test failures
2025-03-14 20:04:56 +08:00
Vangie Du 0d41a17ef6 test: fix unit test failures 2025-03-07 14:49:17 +08:00
RiverRay f7cde17919
Merge pull request #6292 from Little-LittleProgrammer/feature/alibaba-omni-support
feat(alibaba): Added alibaba vision model and omni model support
2025-03-01 10:25:16 +08:00
RiverRay 570cbb34b6
Merge pull request #6310 from agi-dude/patch-1
Remove duplicate links
2025-03-01 10:24:38 +08:00
RiverRay 7aa9ae0a3e
Merge pull request #6311 from ChatGPTNextWeb/6305-bugthe-first-message-except-the-system-message-of-deepseek-reasoner-must-be-a-user-message-but-an-assistant-message-detected
fix: enforce that the first message (excluding system messages) is a …
2025-02-28 19:48:09 +08:00
Kadxy 2d4180f5be fix: update request payload to use filtered messages in Deepseek API 2025-02-28 13:59:30 +08:00
Kadxy 9f0182b55e fix: enforce that the first message (excluding system messages) is a user message in the Deepseek API 2025-02-28 13:54:58 +08:00
Mr. AGI ad6666eeaf
Update README.md 2025-02-28 10:47:52 +05:00
EvanWu a2c4e468a0 fix(app/utils/chat.ts): fix type error 2025-02-26 19:58:32 +08:00
RiverRay 2167076652
Merge pull request #6293 from hyiip/main
claude 3.7 support
2025-02-26 18:41:28 +08:00
RiverRay e123076250
Merge pull request #6295 from rexkyng/patch-1
Fix: Improve Mistral icon detection and remove redundant code.
2025-02-26 18:39:59 +08:00
Rex Ng ebcb4db245
Fix: Improve Mistral icon detection and remove redundant code.
- Added "codestral" to the list of acceptable names for the Mistral icon, ensuring proper detection.
- Removed duplicate `toLowerCase()` calls.
2025-02-25 14:30:18 +08:00
EvanWu 0a25a1a8cb refacto(app/utils/chat.ts)r: optimize function preProcessImageContentBase 2025-02-25 09:22:47 +08:00
hyiip f3154b20a5 claude 3.7 support 2025-02-25 03:55:24 +08:00
EvanWu b709ee3983 feat(alibaba): Added alibaba vision model and omni model support 2025-02-24 20:18:07 +08:00
RiverRay f5f3ce94f6
Update README.md 2025-02-21 08:56:43 +08:00
RiverRay 2b5f600308
Update README.md 2025-02-21 08:55:40 +08:00
RiverRay b966107117
Merge pull request #6235 from DBCDK/danish-locale
Translation to danish
2025-02-17 22:58:01 +08:00
river 377480b448 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-16 10:50:07 +08:00
river 8bd0d6a1a7 chore: Update NextChatAI domain from nextchat.dev to nextchat.club 2025-02-16 10:48:54 +08:00
Rasmus Erik Voel Jensen 90827fc593 danish rewording / improved button label 2025-02-15 13:08:58 +01:00
Rasmus Erik Voel Jensen 008e339b6d danish locale 2025-02-15 12:52:44 +01:00
RiverRay 12863f5213
Merge pull request #6204 from bestsanmao/ali_bytedance_reasoning_content
add 3 type of reasoning_content support (+deepseek-r1@OpenAI @Alibaba @ByteDance), parse <think></think> from SSE
2025-02-13 14:53:47 +08:00
suruiqiang cf140d4228 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content 2025-02-12 17:54:50 +08:00
suruiqiang 476d946f96 fix bug (trim eats space or \n mistakenly), optimize timeout by model 2025-02-12 17:49:54 +08:00
suruiqiang 9714258322 support deepseek-r1@OpenAI's reasoning_content, parse <think></think> from stream 2025-02-11 18:57:16 +08:00
RiverRay 48cd4b11b5
Merge pull request #6190 from siliconflow/refine-emoji-siliconflow
Fix model icon on SiliconFlow
2025-02-11 18:37:47 +08:00
RiverRay 77c78b230a
Merge pull request #6193 from siliconflow/get-models-siliconflow
Model listing of SiliconFlow
2025-02-11 18:37:22 +08:00
RiverRay b44686b887
Merge pull request #6189 from bestsanmao/bug_fix
fix avatar for export message preview and saved image
2025-02-11 18:36:50 +08:00
RiverRay 34bdd4b945
Merge pull request #6194 from siliconflow/vl-support-on-sf
Support VLM on SiliconFlow
2025-02-11 18:35:02 +08:00
suruiqiang b0758cccde optimization 2025-02-11 16:08:30 +08:00
suruiqiang 98a11e56d2 support alibaba and bytedance's reasoning_content 2025-02-11 12:46:46 +08:00
Shenghang Tsai 86f86962fb Support VLM on SiliconFlow 2025-02-10 13:39:06 +08:00
Shenghang Tsai 2137aa65bf Model listing of SiliconFlow 2025-02-10 11:03:49 +08:00
Shenghang Tsai 18fa2cc30d fix model icon on siliconflow 2025-02-09 18:49:26 +08:00
Shenghang Tsai 0bfc648085 fix model icon on siliconflow 2025-02-09 18:47:57 +08:00
suruiqiang 9f91c2d05c fix avatar for export message preview and saved image 2025-02-09 16:52:46 +08:00
RiverRay a029b4330b
Merge pull request #6188 from ChatGPTNextWeb/Leizhenpeng-patch-4
Update LICENSE
2025-02-09 11:05:43 +08:00
RiverRay 2842b264e0
Update LICENSE 2025-02-09 11:05:32 +08:00
RiverRay c2edfec16f
Merge pull request #6172 from bestsanmao/bug_fix
fix several bugs
2025-02-09 11:03:44 +08:00
RiverRay 6406ac99a3
Merge pull request #6175 from itsevin/main
Add other Xai model
2025-02-09 11:02:13 +08:00
suruiqiang 97a4aafc92 Merge remote-tracking branch 'remotes/origin/main' into bug_fix 2025-02-09 09:46:07 +08:00
GH Action - Upstream Sync d8f533e1f3 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-09 01:22:47 +00:00
RiverRay c6199dbf9f
Merge pull request #6186 from siliconflow/fix-truc-of-reasoning-model
Fix formatting of reasoning model on SiliconFlow
2025-02-08 23:40:39 +08:00
RiverRay 4273aa0803
Merge pull request #6185 from siliconflow/larger_timeout_for_siliconflow
Larger timeout for SiliconFlow
2025-02-08 23:39:49 +08:00
Shenghang Tsai acf75ce68f Remove unnecessary trimming 2025-02-08 16:34:17 +08:00
suruiqiang 1ae5fdbf01 mini optimizations 2025-02-08 16:15:10 +08:00
Shenghang Tsai 2a3996e0d6 Update siliconflow.ts 2025-02-08 14:38:12 +08:00
GH Action - Upstream Sync fdbaddde37 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-08 01:16:56 +00:00
suruiqiang d74f79e9c5 Merge remote-tracking branch 'remotes/origin/HEAD' into bug_fix 2025-02-08 08:29:34 +08:00
itsevin c4e9cb03a9 Add Xai model 2025-02-07 20:29:21 +08:00
RiverRay bf265d3375
Merge pull request #6164 from ZhangYichi-ZYc/main
Fix: Set consistent fill color for OpenAI/MoonShot/Grok SVG to prevent color inversion in dark mode
2025-02-07 20:25:20 +08:00
RiverRay 17f391d929
Merge pull request #6158 from dupl/main
update the lastest Gemini models
2025-02-07 20:23:47 +08:00
RiverRay 78186c27fb
Merge pull request #6168 from xiexin12138/fix-env
Fix: 补充 env 中硅基流动的环境变量;追加硅基流动 2 个支持的付费模型
2025-02-07 20:23:01 +08:00
suruiqiang a5a9768245 change request timeout for thinking mode 2025-02-07 16:34:14 +08:00
suruiqiang 3fe55b4f7f fix bug that gemini has multiple candidates part 2025-02-07 16:20:07 +08:00
suruiqiang f156430cc5 fix emoji issue for doubao and glm's congview & congvideox 2025-02-07 16:18:15 +08:00
suruiqiang f30c6a4348 fix doubao and grok not upload image 2025-02-07 16:14:19 +08:00
xiexin12138 a780b39c17 fix: 补充硅基流动对 DeepSeek 支持的付费模型 2025-02-07 15:43:50 +08:00
xiexin12138 1010db834c fix: 补充硅基流动的 env 环境变量 2025-02-07 15:41:40 +08:00
ZhangYichi 51384ddc5f Fix: Set consistent fill color for OpenAI/MoonShot/Grok SVG to prevent color inversion in dark mode 2025-02-07 11:13:22 +08:00
dupl e5e5fde924
update the lastest Gemini models 2025-02-07 06:50:31 +08:00
RiverRay add9ca200c
Merge pull request #6144 from Eric-2369/add-more-llm-icons
feat: add more llm icons
2025-02-06 18:08:08 +08:00
Eric-2369 5225a6e192
feat: add more llm icons 2025-02-05 12:34:00 +08:00
RiverRay 28cbe56cec
Merge pull request #6141 from siliconflow/provider_silicon
New provider SiliconFlow and Its Latest DeekSeek Models
2025-02-04 21:29:02 +08:00
Shenghang Tsai ad9ab9d45a New provider SiliconFlow and Its Latest DeekSeek Models
Update README.md

Update constant.ts

Update README_CN.md
2025-02-04 16:59:26 +08:00
RiverRay bb4832e6e7
Merge pull request #6129 from MonadMonAmi/update_knowledge_cutoff_date
chore: add knowledge cut off dates for o1 and o3
2025-02-04 09:38:04 +08:00
RiverRay 39b3487ea0
Merge branch 'main' into update_knowledge_cutoff_date 2025-02-04 09:37:55 +08:00
RiverRay 32b60909ae
Merge pull request #6132 from RetiredQQ/main
temporary fix for o3-mini
2025-02-04 09:35:43 +08:00
RiverRay 5db6775cb8
Merge pull request #6134 from zcong1993/main
fix: fix isModelNotavailableInServer logic for bytedance models
2025-02-04 09:34:43 +08:00
RiverRay b6881c7797
Merge pull request #6127 from dupl/main
add gemini-2.0-flash-thinking-exp, gemini-2.0-flash-thinking-exp-01-21
2025-02-04 09:33:13 +08:00
RiverRay 9943a52295
Update README.md 2025-02-04 09:31:16 +08:00
RiverRay 1db4d25370
Update README.md 2025-02-04 09:29:56 +08:00
zcong1993 92f57fb18f
fix: fix isModelNotavailableInServer logic for bytedance models 2025-02-03 16:58:42 +08:00
Sky 4c4d44e2f8 fix 2025-02-02 21:45:30 +00:00
Sky 8f12beb8f0 support o3-mini 2025-02-02 21:43:30 +00:00
AndrewS 2e7cac3218 chore: add knowledge cut off dates for o1 and o3 2025-02-02 19:44:53 +01:00
dupl 60fa358010
typo: OpanAI -> OpenAI 2025-02-02 23:27:45 +08:00
dupl 034b7d4655
add gemini-2.0-flash-thinking-exp, gemini-2.0-flash-thinking-exp-01-21 2025-02-02 23:11:07 +08:00
RiverRay 1e20b64048
Merge pull request #6121 from ChatGPTNextWeb/feat/support-openai-o3-mini
feat(model): add support for OpenAI o3-mini model
2025-02-02 20:57:21 +08:00
Kadxy 4f28fca506 feat: Support OpenAI o3-mini 2025-02-01 15:02:06 +08:00
RiverRay 3ef5993085
Merge pull request #6119 from ChatGPTNextWeb/Leizhenpeng-patch-3
Update README.md
2025-01-31 08:18:47 +08:00
RiverRay 09ad7c1875
Update README.md 2025-01-31 08:18:13 +08:00
RiverRay 31e52cb47e
更新 README.md 2025-01-31 06:53:39 +08:00
RiverRay 9a69c5bd7c
Merge pull request #6118 from ChatGPTNextWeb/feat/issue-6104-deepseek-reasoning-content 2025-01-31 06:48:00 +08:00
Kadxy be645aab37 fix: revert unintended changes 2025-01-31 00:59:03 +08:00
RiverRay c41e86faa6
Merge pull request #6116 from ChatGPTNextWeb/feat/issue-6104-deepseek-reasoning-content
Support DeepSeek API streaming reasoning content
2025-01-31 00:52:18 +08:00
river 143be69a7f chore: remove log 2025-01-31 00:50:03 +08:00
river 63b7626656 chore: change md 2025-01-31 00:49:09 +08:00
Kadxy dabb7c70d5 feat: Remove reasoning_contentfor DeepSeek API messages 2025-01-31 00:30:08 +08:00
Kadxy c449737127 feat: Support DeepSeek API streaming with thinking mode 2025-01-31 00:07:52 +08:00
RiverRay 553b8c9f28
Update .env.template 2025-01-27 13:05:17 +08:00
river 19314793b8 Merge branch 'bestsanmao-bug_fix' 2025-01-27 12:55:31 +08:00
river 8680182921 feat: Add DeepSeek API key and fix MCP environment variable parsing 2025-01-27 12:48:59 +08:00
suruiqiang 2173c82bb5 add deepseek-reasoner, and change deepseek's summary model to deepseek-chat 2025-01-23 18:47:22 +08:00
suruiqiang 0d5e66a9ae not insert mcpSystemPrompt if not ENABLE_MCP 2025-01-23 18:24:38 +08:00
RiverRay 2f9cb5a68f
Merge pull request #6084 from ChatGPTNextWeb/temp-fix
fix: missing mcp_config.json files required for building
2025-01-22 21:40:37 +08:00
Kadxy 55cacfb7e2 fix: missing files required for building 2025-01-22 21:28:29 +08:00
RiverRay 6a862372f7
Merge pull request #6082 from ChatGPTNextWeb/Leizhenpeng-patch-2
Update README_CN.md
2025-01-22 13:11:11 +08:00
RiverRay 81bd83eb44
Update README_CN.md 2025-01-22 13:08:33 +08:00
RiverRay b2b6fd81be
Merge pull request #6075 from Kadxy/main 2025-01-20 10:44:46 +08:00
Kadxy f22cfd7b33
Update chat.tsx 2025-01-20 10:10:52 +08:00
RiverRay 8111acff34
Update README.md 2025-01-20 00:17:47 +08:00
RiverRay 4cad55379d
Merge pull request #5974 from ChatGPTNextWeb/feat-mcp
Support MCP( WIP)
2025-01-20 00:07:41 +08:00
Kadxy a3d3ce3f4c
Merge branch 'main' into feat-mcp 2025-01-19 23:28:12 +08:00
Kadxy 611e97e641 docs: update README.md 2025-01-19 23:20:58 +08:00
Kadxy bfeea4ed49 fix: prevent MCP operations from blocking chat interface 2025-01-19 01:02:01 +08:00
Kadxy bc71ae247b feat: add ENABLE_MCP env var to toggle MCP feature globally and in Docker 2025-01-18 21:19:01 +08:00
Kadxy 0112b54bc7 fix: missing en translation 2025-01-16 22:35:26 +08:00
Kadxy 65810d918b feat: improve async operations and UI feedback 2025-01-16 21:31:19 +08:00
river 4d535b1cd0 chore: enhance mcp prompt 2025-01-16 20:54:24 +08:00
Kadxy 588d81e8f1 feat: remove unused files 2025-01-16 09:17:08 +08:00
Kadxy d4f499ee41 feat: adjust form style 2025-01-16 09:11:53 +08:00
Kadxy 4d63d73b2e feat: load MCP preset data from server 2025-01-16 09:00:57 +08:00
Kadxy 07c63497dc feat: support stop/start MCP servers 2025-01-16 08:52:54 +08:00
Kadxy e440ff56c8 fix: env not work 2025-01-15 18:47:05 +08:00
river c89e4883b2 chore: update icon 2025-01-15 17:31:18 +08:00
river ac3d940de8 Merge branch 'feat-mcp' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into feat-mcp 2025-01-15 17:29:43 +08:00
Kadxy be59de56f0 feat: Display the number of clients instead of the number of available tools. 2025-01-15 17:24:04 +08:00
river a70e9a3c01 chore:update mcp icon 2025-01-15 17:23:10 +08:00
Kadxy 8aa9a500fd feat: Optimize MCP configuration logic 2025-01-15 16:52:54 +08:00
RiverRay 93652db688
Update README.md 2025-01-13 16:57:50 +08:00
RiverRay 8421c483e8
Update README.md 2025-01-12 12:56:13 +08:00
Dogtiti 4ac27fdd4d
Merge pull request #6033 from lvguanjun/fix_fork_session
fix: prevent message sync between forked sessions by generating unique IDs
2025-01-11 16:19:02 +08:00
Dogtiti b6b2c501fd
Merge pull request #6034 from dupl/main
Correct the typos in user-manual-cn.md
2025-01-11 16:17:32 +08:00
Kadxy ce13cf61a7 feat: ignore mcp_config.json 2025-01-09 20:15:47 +08:00
Kadxy a3af563e89 feat: Reset mcp_config.json to empty 2025-01-09 20:13:16 +08:00
Kadxy e95c94d7be fix: inaccurate content 2025-01-09 20:10:10 +08:00
Kadxy 125a71fead fix: unnecessary initialization 2025-01-09 20:07:24 +08:00
Kadxy b410ec399c feat: auto scroll to bottom when MCP response 2025-01-09 20:02:27 +08:00
Kadxy 7d51bfd42e feat: MCP market 2025-01-09 19:51:01 +08:00
Kadxy 0c14ce6417 fix: MCP execution content matching failed. 2025-01-09 13:41:17 +08:00
Kadxy f2a2b40d2c feat: carry mcp primitives content as a system prompt 2025-01-09 10:20:56 +08:00
Kadxy 77be190d76 feat: carry mcp primitives content as a system prompt 2025-01-09 10:09:46 +08:00
dupl c56587c438
Correct the typos in user-manual-cn.md 2025-01-05 20:34:18 +08:00
lvguanjun 840c151ab9 fix: prevent message sync between forked sessions by generating unique IDs 2025-01-05 11:22:53 +08:00
RiverRay 0af04e0f2f
Merge pull request #5468 from DDMeaqua/feat-shortcutkey
feat: #5422 快捷键清除上下文
2024-12-31 16:23:10 +08:00
DDMeaqua d184eb6458 chore: cmd + shift+ backspace 2024-12-31 14:50:54 +08:00
DDMeaqua c5d9b1131e fix: merge bug 2024-12-31 14:38:58 +08:00
DDMeaqua e13408dd24 Merge branch 'main' into feat-shortcutkey 2024-12-31 14:30:09 +08:00
DDMeaqua aba4baf384 chore: update 2024-12-31 14:25:43 +08:00
DDMeaqua 6d84f9d3ae chore: update 2024-12-31 13:27:15 +08:00
Dogtiti 63c5baaa80
Merge pull request #6010 from code-october/fix-visionModels
修复 VISION_MDOELS 在 docker 运行阶段不生效的问题
2024-12-31 09:56:46 +08:00
Dogtiti defefba925
Merge pull request #6016 from bestsanmao/add_deepseek
fix issue #6009  add setting items for deepseek
2024-12-30 19:27:20 +08:00
suruiqiang 90c531c224 fix issue #6009 add setting items for deepseek 2024-12-30 18:23:18 +08:00
code-october 266e9efd2e rename the function 2024-12-30 09:13:12 +00:00
code-october 57c88c0717 修复 VISION_MDOELS 在 docker 运行阶段不生效的问题 2024-12-30 08:58:41 +00:00
DDMeaqua 5b5dea1c59 chore: 更换快捷键 2024-12-30 12:11:50 +08:00
Dogtiti d56566cd73
Merge pull request #6001 from bestsanmao/add_deepseek
docs: add DEEPSEEK_API_KEY and DEEPSEEK_URL in README
2024-12-30 09:42:22 +08:00
suruiqiang b5d104c908 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into add_deepseek 2024-12-30 09:04:40 +08:00
RiverRay f9e9129d52
Update README.md 2024-12-29 19:57:27 +08:00
suruiqiang 2a8a18391e docs: add DEEPSEEK_API_KEY and DEEPSEEK_URL in README 2024-12-29 15:31:50 +08:00
Dogtiti e1cb8e36fa
Merge pull request #5989 from bestsanmao/add_deepseek
since #5984, add DeepSeek as a new ModelProvider (with deepseek-chat&deepseek-coder models), so that user can use openai and deepseek at same time with different api url & key
2024-12-29 12:35:21 +08:00
suruiqiang b948d6bf86 bug fix 2024-12-29 11:24:57 +08:00
Kadxy fe67f79050 feat: MCP message type 2024-12-29 09:24:52 +08:00
suruiqiang 67338ff9b7 add KnowledgeCutOffDate for deepseek 2024-12-29 08:58:45 +08:00
suruiqiang 7380c8a2c1 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into add_deepseek 2024-12-29 08:43:25 +08:00
Kadxy e1ba8f1b0f feat: Send MCP response as a user 2024-12-29 08:29:02 +08:00
Dogtiti c0062ff280
Merge pull request #5998 from dupl/main
Use regular expressions to make the code more concise.
2024-12-29 00:22:13 +08:00
dupl 39e593da48 Use regular expressions to make the code more concise. 2024-12-28 23:49:28 +08:00
Dogtiti f8b10ad8b1
Merge pull request #5997 from ChatGPTNextWeb/feature/glm-4v
feature: support glm-4v
2024-12-28 23:34:44 +08:00
Dogtiti 8a22c9d6db feature: support glm-4v 2024-12-28 23:33:06 +08:00
RiverRay 5f96804f3b
Merge pull request #5920 from fishshi/i18n
Use i18n for DISCOVERY
2024-12-28 22:05:37 +08:00
RiverRay 13430ea3e2
Merge pull request #5965 from zmhuanf/temp
Fix issue #5964: Prevents character loss in gemini-2.0-flash-thinking-exp-1219 responses
2024-12-28 22:02:02 +08:00
Kadxy 664879b9df feat: Create all MCP Servers at startup 2024-12-28 21:06:26 +08:00
Dogtiti 9df24e568b
Merge pull request #5996 from ChatGPTNextWeb/feature/cogview
Feature/cogview
2024-12-28 20:25:25 +08:00
Dogtiti bc322be448 fix: type error 2024-12-28 20:24:08 +08:00
Dogtiti a867adaf04 fix: size 2024-12-28 20:23:51 +08:00
Dogtiti 0cb186846a feature: support glm Cogview 2024-12-28 20:23:44 +08:00
Dogtiti e467ce028d
Merge pull request #5994 from ConnectAI-E/fix/failed-test
fix: failed unit test
2024-12-28 17:55:29 +08:00
Dogtiti cdfe907fb5 fix: failed unit test 2024-12-28 17:54:21 +08:00
Dogtiti d91af7f983
Merge pull request #5883 from code-october/fix/model-leak
fix model leak issue
2024-12-28 14:47:35 +08:00
Kadxy c3108ad333 feat: simple MCP example 2024-12-28 14:31:43 +08:00
suruiqiang 081daf937e since #5984, add DeepSeek as a new ModelProvider (with deepseek-chat&deepseek-corder models), so that user can use openai and deepseek at same time with different api url&key 2024-12-27 16:57:26 +08:00
RiverRay 0c3d4462ca
Merge pull request #5976 from ChatGPTNextWeb/Leizhenpeng-patch-1
Update README.md
2024-12-23 22:47:59 +08:00
RiverRay 3c859fc29f
Update README.md 2024-12-23 22:47:16 +08:00
river e1c7c54dfa chore: change md 2024-12-23 22:32:36 +08:00
zmhuanf 87b5e3bf62 修复bug; 2024-12-22 15:44:47 +08:00
Dogtiti 1d15666713
Merge pull request #5919 from Yiming3/feature/flexible-visual-model
feat: runtime configuration of vision-capable models
2024-12-22 10:37:57 +08:00
Yiming Zhang a127ae1fb4 docs: add VISION_MODELS section to README files 2024-12-21 13:12:41 -05:00
Yiming Zhang ea1329f73e fix: add optional chaining to prevent errors when accessing visionModels 2024-12-21 04:07:58 -05:00
Yiming Zhang 149d732cb7 Merge remote-tracking branch 'upstream/main' into feature/flexible-visual-model 2024-12-21 03:53:05 -05:00
Yiming Zhang 210b29bfbe refactor: remove NEXT_PUBLIC_ prefix from VISION_MODELS env var 2024-12-21 03:51:54 -05:00
Dogtiti acc2e97aab
Merge pull request #5959 from dupl/gemini
add gemini-exp-1206, gemini-2.0-flash-thinking-exp-1219
2024-12-21 16:30:09 +08:00
dupl 93ac0e5017
Reorganized the Gemini model 2024-12-21 15:26:33 +08:00
Yiming Zhang ed8c3580c8 test: add unit tests for isVisionModel utility function 2024-12-20 19:07:00 -05:00
dupl 0a056a7c5c add gemini-exp-1206, gemini-2.0-flash-thinking-exp-1219 2024-12-21 08:00:37 +08:00
Yiming Zhang 74c4711cdd Merge remote-tracking branch 'upstream/main' into feature/flexible-visual-model 2024-12-20 18:34:07 -05:00
Dogtiti eceec092cf
Merge pull request #5932 from fengzai6/update-google-models
Update google models to add gemini-2.0
2024-12-21 00:43:02 +08:00
Dogtiti 42743410a8
Merge pull request #5940 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/react-16.1.0
chore(deps-dev): bump @testing-library/react from 16.0.1 to 16.1.0
2024-12-21 00:41:45 +08:00
Dogtiti 0f04756d4c
Merge pull request #5936 from InitialXKO/main
面具“以文搜图”改成“AI文生图”,微调提示让图片生成更稳定无水印
2024-12-21 00:40:45 +08:00
dependabot[bot] acdded8161
chore(deps-dev): bump @testing-library/react from 16.0.1 to 16.1.0
Bumps [@testing-library/react](https://github.com/testing-library/react-testing-library) from 16.0.1 to 16.1.0.
- [Release notes](https://github.com/testing-library/react-testing-library/releases)
- [Changelog](https://github.com/testing-library/react-testing-library/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/react-testing-library/compare/v16.0.1...v16.1.0)

---
updated-dependencies:
- dependency-name: "@testing-library/react"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-16 10:57:34 +00:00
InitialXKO e939ce5a02
面具“以文搜图”改成“AI文生图”,微调提示让图片生成更稳定无水印 2024-12-13 22:29:14 +08:00
Nacho.L 46a0b100f7 Update versionKeywords 2024-12-13 08:29:43 +08:00
Nacho.L e27e8fb0e1 Update google models 2024-12-13 07:22:16 +08:00
fishshi 93c5320bf2 Use i18n for DISCOVERY 2024-12-10 15:56:04 +08:00
Yiming Zhang a433d1606c feat: use regex patterns for vision models and allow adding capabilities to models through env var NEXT_PUBLIC_VISION_MODELS. 2024-12-10 00:22:45 -05:00
code-october cc5e16b045 update unit test 2024-11-30 07:30:52 +00:00
code-october 54f6feb2d7 update unit test 2024-11-30 07:28:38 +00:00
code-october e1ac0538b8 add unit test 2024-11-30 07:22:24 +00:00
code-october 1a678cb4d8 fix model leak issue 2024-11-29 15:47:28 +00:00
Dogtiti 83cea3a90d
Merge pull request #5879 from frostime/textline-custom-model
🎨 style(setting): Place custom-model's input a separated row.
2024-11-28 12:02:42 +08:00
frostime 759a09a76c 🎨 style(setting): Place custom-model's input a seperated row. 2024-11-27 13:11:18 +08:00
Dogtiti 2623a92763
Merge pull request #5850 from code-october/fix-o1
Fix o1
2024-11-25 12:31:36 +08:00
Dogtiti 3932c594c7
Merge pull request #5861 from code-october/update-model
update new model for gpt-4o and gemini-exp
2024-11-22 20:59:30 +08:00
code-october b7acb89096 update new model for gpt-4o and gemini-exp 2024-11-22 09:48:50 +00:00
code-october ef24d3e633 use stream when request o1 2024-11-21 03:46:10 +00:00
code-october 23350c842b fix o1 in disableGPT4 2024-11-21 03:45:07 +00:00
Dogtiti a2adfbbd32
Merge pull request #5821 from Sherlocksuper/scroll
feat: support more user-friendly scrolling
2024-11-16 15:24:46 +08:00
Lloyd Zhou f22cec1eb4
Merge pull request #5827 from ConnectAI-E/fix/markdown-embed-codeblock
fix: 代码块嵌入小代码块时渲染错误
2024-11-15 16:03:27 +08:00
opchips e56216549e fix: 代码块嵌入小代码块时渲染错误 2024-11-15 11:56:26 +08:00
Sherlock 19facc7c85 feat: support mort user-friendly scrolling 2024-11-14 21:31:45 +08:00
Lloyd Zhou b08ce5630c
Merge pull request #5819 from ConnectAI-E/fix-gemini-summary
Fix gemini summary
2024-11-13 15:17:44 +08:00
DDMeaqua b41c012d27 chore: shouldStream 2024-11-13 15:12:46 +08:00
Lloyd Zhou a392daab71
Merge pull request #5816 from ConnectAI-E/feature/artifacts-svg
artifacts support svg
2024-11-13 14:58:33 +08:00
DDMeaqua 0628ddfc6f chore: update 2024-11-13 14:27:41 +08:00
DDMeaqua 7eda14f138 fix: [#5308] gemini对话总结 2024-11-13 14:24:44 +08:00
opchips 9a86c42c95 update 2024-11-12 16:33:55 +08:00
Lloyd Zhou 819d249a09
Merge pull request #5815 from LovelyGuYiMeng/main
更新视觉模型匹配关键词
2024-11-12 15:04:11 +08:00
LovelyGuYiMeng 8d66fedb1f
Update visionKeywords 2024-11-12 14:28:11 +08:00
Lloyd Zhou 7cf89b53ce
Merge pull request #5812 from ConnectAI-E/fix/rerender-chat
fix: use current session id to trigger rerender
2024-11-12 13:49:51 +08:00
Dogtiti 459c373f13
Merge pull request #5807 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/jest-dom-6.6.3
chore(deps-dev): bump @testing-library/jest-dom from 6.6.2 to 6.6.3
2024-11-11 20:59:56 +08:00
Dogtiti 1d14a991ee fix: use current session id to trigger rerender 2024-11-11 20:30:59 +08:00
dependabot[bot] 05ef5adfa7
chore(deps-dev): bump @testing-library/jest-dom from 6.6.2 to 6.6.3
Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 6.6.2 to 6.6.3.
- [Release notes](https://github.com/testing-library/jest-dom/releases)
- [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/jest-dom/compare/v6.6.2...v6.6.3)

---
updated-dependencies:
- dependency-name: "@testing-library/jest-dom"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-11 10:53:00 +00:00
lloydzhou 38fa3056df update version 2024-11-11 13:26:08 +08:00
Lloyd Zhou 289aeec8af
Merge pull request #5786 from ConnectAI-E/feature/realtime-chat
Feature/realtime chat
2024-11-11 13:19:26 +08:00
lloydzhou 7d71da938f remove close-24 svg 2024-11-11 13:15:09 +08:00
Lloyd Zhou f8f6954115
Merge pull request #5779 from ConnectAI-E/feature/model/claude35haiku
add claude35haiku & not support vision
2024-11-11 13:13:09 +08:00
Lloyd Zhou 6e03f32871
Merge pull request #5795 from JingSyue/main
fix: built-in plugin dalle3 error #5787
2024-11-11 13:10:00 +08:00
JingSyue 18a6571883
Update proxy.ts
Update proxy.ts
2024-11-11 12:59:29 +08:00
Dogtiti 14f444e1f0 doc: realtime chat 2024-11-11 11:47:41 +08:00
JingSyue 2b0f2e5f9d fix: built-in plugin dalle3 error #5787 2024-11-10 10:28:25 +08:00
Dogtiti 4629b39c29 chore: comment context history 2024-11-09 16:22:01 +08:00
Dogtiti d33e772fa5 feat: voice print 2024-11-08 22:39:17 +08:00
Dogtiti 89136fba32 feat: voice print 2024-11-08 22:18:39 +08:00
Dogtiti 8b4ca133fd feat: voice print 2024-11-08 22:02:31 +08:00
lloydzhou a4c9eaf6cd do not save empty audio file 2024-11-08 13:43:13 +08:00
lloydzhou 50e63109a3 merge code and get analyser data 2024-11-08 13:21:40 +08:00
Dogtiti 48a1e8a584 chore: i18n 2024-11-07 21:32:47 +08:00
Dogtiti e44ebe3f0e feat: realtime config 2024-11-07 21:28:23 +08:00
Lloyd Zhou 108069a0c6
Merge pull request #5788 from ConnectAI-E/fix-o1-maxtokens
chore: o1模型使用max_completion_tokens
2024-11-07 20:06:30 +08:00
DDMeaqua d5bda2904d chore: o1模型使用max_completion_tokens 2024-11-07 19:45:27 +08:00
lloydzhou 283caba8ce stop streaming play after get input audio. 2024-11-07 18:57:57 +08:00
lloydzhou b78e5db817 add temperature config 2024-11-07 17:55:51 +08:00
lloydzhou 46c469b2d7 add voice config 2024-11-07 17:47:55 +08:00
lloydzhou c00ebbea4f update 2024-11-07 17:40:03 +08:00
lloydzhou c526ff80b5 update 2024-11-07 17:23:20 +08:00
lloydzhou 0037b0c944 ts error 2024-11-07 17:03:04 +08:00
lloydzhou 6f81bb3b8a add context after connected 2024-11-07 16:56:15 +08:00
lloydzhou 7bdc45ed3e connect realtime model when open panel 2024-11-07 16:41:24 +08:00
Dogtiti 88cd3ac122 fix: ts error 2024-11-07 12:16:11 +08:00
Dogtiti 4988d2ee26 fix: ts error 2024-11-07 11:56:58 +08:00
lloydzhou 8deb7a92ee hotfix for update target session 2024-11-07 11:53:01 +08:00
lloydzhou db060d732a upload save record wav file 2024-11-07 11:45:38 +08:00
lloydzhou 522627820a upload save wav file logic 2024-11-07 09:36:22 +08:00
lloydzhou cf46d5ad63 upload response audio, and update audio_url to session message 2024-11-07 01:12:08 +08:00
Dogtiti a4941521d0 feat: audio to message 2024-11-06 22:30:02 +08:00
Dogtiti f6e1f8398b wip 2024-11-06 22:07:33 +08:00
Dogtiti d544eead38 feat: realtime chat ui 2024-11-06 21:14:45 +08:00
Lloyd Zhou fbb9385f23
Merge pull request #5782 from ConnectAI-E/style/classname
style: improve classname by clsx
2024-11-06 20:33:51 +08:00
Dogtiti 18144c3d9c chore: clsx 2024-11-06 20:16:38 +08:00
opchips 64aa760e58 update claude rank 2024-11-06 19:18:05 +08:00
Dogtiti e0bbb8bb68 style: improve classname by clsx 2024-11-06 16:58:26 +08:00
opchips 6667ee1c7f merge main 2024-11-06 15:08:18 +08:00
Lloyd Zhou 6ded4e96e7
Merge pull request #5778 from ConnectAI-E/fix/5436
fix: botMessage reply date
2024-11-06 15:04:46 +08:00
Dogtiti 85cdcab850 fix: botMessage reply date 2024-11-06 14:53:08 +08:00
Lloyd Zhou f4c9410c29
Merge pull request #5776 from ConnectAI-E/feat-glm
fix: glm chatpath
2024-11-06 14:02:20 +08:00
DDMeaqua adf7d8200b fix: glm chatpath 2024-11-06 13:55:57 +08:00
opchips 3086a2fa77 add claude35haiku not vision 2024-11-06 12:56:24 +08:00
Lloyd Zhou f526d6f560
Merge pull request #5774 from ConnectAI-E/feature/update-target-session
fix: updateCurrentSession => updateTargetSession
2024-11-06 11:16:33 +08:00
Dogtiti 106461a1e7 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/update-target-session 2024-11-06 11:08:41 +08:00
Dogtiti c4e19dbc59 fix: updateCurrentSession => updateTargetSession 2024-11-06 11:06:18 +08:00
Dogtiti f3603e59fa
Merge pull request #5769 from ryanhex53/fix-model-multi@
Custom model names can include the `@` symbol by itself.
2024-11-06 10:49:28 +08:00
ryanhex53 8e2484fcdf Refactor: Replace all provider split occurrences with getModelProvider utility method 2024-11-05 13:52:54 +00:00
lloydzhou 00d6cb27f7 update version 2024-11-05 17:42:55 +08:00
ryanhex53 b844045d23 Custom model names can include the `@` symbol by itself.
To specify the model's provider, append it after the model name using `@` as before.

This format supports cases like `google vertex ai` with a model name like `claude-3-5-sonnet@20240620`.

For instance, `claude-3-5-sonnet@20240620@vertex-ai` will be split by `split(/@(?!.*@)/)` into:

`[ 'claude-3-5-sonnet@20240620', 'vertex-ai' ]`, where the former is the model name and the latter is the custom provider.
2024-11-05 07:44:12 +00:00
Lloyd Zhou e49fe976d9
Merge pull request #5765 from ConnectAI-E/feature/onfinish
feat: update real 'currentSession'
2024-11-05 15:07:52 +08:00
Dogtiti 14f751965f
Merge pull request #5767 from ConnectAI-E/feat-glm
chore: update readme
2024-11-05 11:07:52 +08:00
DDMeaqua 0ec423389f chore: update readme 2024-11-05 11:06:20 +08:00
Dogtiti 820ab54e2d
Merge pull request #5766 from ConnectAI-E/feature/add-claude-haiku3.5
Feature/add claude haiku3.5
2024-11-05 10:54:52 +08:00
lloydzhou a6c1eb27a8 add claude 3.5 haiku 2024-11-05 10:23:15 +08:00
Lloyd Zhou 0dc4071ccc
Merge pull request #5464 from endless-learner/main
Added 1-click deployment link for Alibaba Cloud.
2024-11-05 01:10:06 +08:00
Lloyd Zhou 4d3949718a
merge main 2024-11-05 01:09:27 +08:00
Dogtiti aef535f1a7
Merge pull request #5753 from ChatGPTNextWeb/feat-bt-doc
Feat bt doc
2024-11-04 21:41:11 +08:00
Dogtiti 686a80e727
Merge pull request #5764 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/react-16.0.1
chore(deps-dev): bump @testing-library/react from 16.0.0 to 16.0.1
2024-11-04 21:37:34 +08:00
Dogtiti e49466fa05 feat: update real 'currentSession' 2024-11-04 21:25:56 +08:00
dependabot[bot] 4b93370814
chore(deps-dev): bump @testing-library/react from 16.0.0 to 16.0.1
Bumps [@testing-library/react](https://github.com/testing-library/react-testing-library) from 16.0.0 to 16.0.1.
- [Release notes](https://github.com/testing-library/react-testing-library/releases)
- [Changelog](https://github.com/testing-library/react-testing-library/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/react-testing-library/compare/v16.0.0...v16.0.1)

---
updated-dependencies:
- dependency-name: "@testing-library/react"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-04 10:24:30 +00:00
Dogtiti 5733e3c588
Merge pull request #5759 from ConnectAI-E/feature/onfinish
Feature/onfinish
2024-11-04 17:16:44 +08:00
Dogtiti 44fc5b5cbf fix: onfinish responseRes 2024-11-04 17:00:45 +08:00
Dogtiti 2d3f7c922f fix: vision model dalle3 2024-11-04 15:51:04 +08:00
GH Action - Upstream Sync fe8cca3730 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-11-02 01:12:09 +00:00
weige fbb7a1e853 fix 2024-11-01 18:20:16 +08:00
weige fb2c15567d fix 2024-11-01 17:45:50 +08:00
weige c2c52a1f60 fix 2024-11-01 17:35:34 +08:00
weige 106ddc17cd fix 2024-11-01 17:35:09 +08:00
weige 17d5209738 add bt install doc 2024-11-01 17:28:20 +08:00
Dogtiti d66bfc6352
Merge pull request #5752 from ConnectAI-E/feat-glm
fix: ts error
2024-11-01 14:16:50 +08:00
DDMeaqua 4d75b23ed1 fix: ts error 2024-11-01 14:15:12 +08:00
Dogtiti 36bfa2ef7c
Merge pull request #5741 from ConnectAI-E/feat-glm
feat: [#5714] 支持GLM
2024-11-01 13:57:30 +08:00
DDMeaqua afe12c212e chore: update 2024-11-01 13:53:43 +08:00
GH Action - Upstream Sync adf97c6d8b Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-11-01 01:18:59 +00:00
DDMeaqua 7a8d557ea3 chore: 开启插件 2024-10-31 11:37:19 +08:00
DDMeaqua d3f0a77830 chore: update Provider 2024-10-31 11:23:06 +08:00
Dogtiti 0581e37236
Merge pull request #5744 from mrcore/main
add  claude-3-5-sonnet-latest and claude-3-opus-latest
2024-10-31 11:19:34 +08:00
Core 44383a8b33
add claude-3-5-sonnet-latest and claude-3-opus-latest
add  claude-3-5-sonnet-latest and claude-3-opus-latest
2024-10-31 11:00:45 +08:00
GH Action - Upstream Sync 7c466c9b9c Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-10-31 01:14:28 +00:00
Dogtiti a0fa4d7e72
Merge pull request #5737 from hyiip/claude3.5
add constant to claude 3.5 sonnet 20241022
2024-10-31 00:13:16 +08:00
DDMeaqua d357b45e84 feat: [#5714] 支持GLM 2024-10-30 19:24:03 +08:00
Lloyd Zhou d0bd1bf8fd
Merge pull request #5740 from yuxuan-ctrl/main
feat: 新增阿里系模型代码配置
2024-10-30 16:56:53 +08:00
yuxuan-ctrl 86ffa1e643 feat: 新增阿里系模型代码配置 2024-10-30 16:30:01 +08:00
endless-learner b0d28eb77e
Merge branch 'main' into main 2024-10-29 14:38:49 -07:00
hyiip 736cbdbdd1 add constant to claude 3.5 sonnet 20241022 2024-10-30 02:18:41 +08:00
Dogtiti 613d67eada
Merge pull request #5729 from ConnectAI-E/feature/jest
chore: improve jest
2024-10-29 19:39:59 +08:00
Dogtiti 89cea18955 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/jest 2024-10-29 19:26:52 +08:00
Dogtiti 56bc77d20b
Merge pull request #5731 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/jest-dom-6.6.2
Bump @testing-library/jest-dom from 6.4.8 to 6.6.2
2024-10-28 21:52:08 +08:00
Dogtiti 6d93d37963
Merge pull request #5732 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/jest-29.5.14
Bump @types/jest from 29.5.13 to 29.5.14
2024-10-28 21:51:59 +08:00
dependabot[bot] 24df85cf9d
Bump @types/jest from 29.5.13 to 29.5.14
Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.13 to 29.5.14.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest)

---
updated-dependencies:
- dependency-name: "@types/jest"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:31:34 +00:00
dependabot[bot] a4d7a2c6e3
Bump @testing-library/jest-dom from 6.4.8 to 6.6.2
Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 6.4.8 to 6.6.2.
- [Release notes](https://github.com/testing-library/jest-dom/releases)
- [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/jest-dom/compare/v6.4.8...v6.6.2)

---
updated-dependencies:
- dependency-name: "@testing-library/jest-dom"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:31:27 +00:00
Dogtiti 49d42bb45d chore: improve jest 2024-10-28 16:47:05 +08:00
Lloyd Zhou 4f49626303
Merge pull request #5722 from ElricLiu/main
Update README.md
2024-10-26 12:09:09 +08:00
ElricLiu 45db20c1c3
Update README.md 2024-10-26 11:16:43 +08:00
Lloyd Zhou 82994843f5
Merge pull request #5719 from ConnectAI-E/hotfix/status_text_error
hotfix for statusText is non ISO-8859-1 #5717
2024-10-25 20:34:15 +08:00
Dogtiti 1110a087a0
Merge pull request #5720 from ConnectAI-E/hotfix/gemini_invald_argument
hotfix for gemini invald argument #5715
2024-10-25 18:25:46 +08:00
lloydzhou f0b3e10a6c hotfix for gemini invald argument #5715 2024-10-25 18:19:22 +08:00
lloydzhou f89872b833 hotfix for gemini invald argument #5715 2024-10-25 18:12:09 +08:00
lloydzhou 90ced92876 update 2024-10-25 18:05:29 +08:00
lloydzhou 2c74559010 hitfix 2024-10-25 18:02:51 +08:00
lloydzhou e3ca7e8b44 hotfix for statusText is non ISO-8859-1 #5717 2024-10-25 17:52:08 +08:00
lloydzhou 4745706c42 update version to v2.15.6 2024-10-24 15:32:27 +08:00
lloydzhou 801dc412f9 add claude-3.5-haiku 2024-10-24 15:28:05 +08:00
Dogtiti c7c2c0211a
Merge pull request #5704 from ConnectAI-E/feature/xai
xAi support
2024-10-23 14:13:17 +08:00
lloydzhou 65bb962fc0 hotfix 2024-10-23 12:00:59 +08:00
lloydzhou e791cd441d add xai 2024-10-23 11:55:25 +08:00
lloydzhou 8455fefc8a add xai 2024-10-23 11:40:06 +08:00
Lloyd Zhou 06f897f32f
Merge pull request #5679 from ConnectAI-E/fix/fetch
fix: use tauri fetch
2024-10-16 22:02:16 +08:00
Dogtiti deb1e76c41 fix: use tauri fetch 2024-10-16 21:57:07 +08:00
lloydzhou 463fa743e9 update version 2024-10-15 16:10:44 +08:00
Dogtiti cda4494cec
Merge pull request #5632 from ConnectAI-E/feature/H0llyW00dzZ-updater
Feature/h0lly w00dz z updater
2024-10-15 14:31:49 +08:00
lloydzhou 87d85c10c3 update 2024-10-14 21:48:36 +08:00
Dogtiti 22f83c9e11
Merge pull request #5666 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/jest-29.5.13
Bump @types/jest from 29.5.12 to 29.5.13
2024-10-14 20:36:53 +08:00
dependabot[bot] 7f454cbcec
Bump @types/jest from 29.5.12 to 29.5.13
Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.12 to 29.5.13.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest)

---
updated-dependencies:
- dependency-name: "@types/jest"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-14 10:49:46 +00:00
lloydzhou 426269d795 Merge remote-tracking branch 'connectai/main' into feature/H0llyW00dzZ-updater 2024-10-14 17:12:08 +08:00
Lloyd Zhou 370f143157
Merge pull request #5661 from ChatGPTNextWeb/remove-pr-preview
update test run target
2024-10-14 17:11:26 +08:00
lloydzhou 103106bb93 update test run target 2024-10-14 17:10:02 +08:00
lloydzhou 2419083adf Merge remote-tracking branch 'connectai/main' into feature/H0llyW00dzZ-updater 2024-10-14 17:04:12 +08:00
Lloyd Zhou c25903bfb4
Merge pull request #5658 from ccq18/main
fix  o1系列模型超时时间改为4分钟,
2024-10-14 16:57:29 +08:00
Lloyd Zhou e34c266438
Merge pull request #5660 from ChatGPTNextWeb/remove-pr-preview
update deploy_preview run target
2024-10-14 16:55:48 +08:00
lloydzhou 8c39a687b5 update deploy_preview run target 2024-10-14 16:53:46 +08:00
ccq18 592f62005b 仅修改o1的超时时间为4分钟,减少o1系列模型请求失败的情况 2024-10-14 16:31:17 +08:00
ccq18 12e7caa209 fix 默认超时时间改为3分钟,支持o1-mini 2024-10-14 16:03:01 +08:00
Lloyd Zhou b016771555
Merge pull request #5599 from ConnectAI-E/feature/allow-send-image-only
feat: allow send image only
2024-10-14 15:11:28 +08:00
Dogtiti a84383f919
Merge pull request #5647 from code-october/fix/setting-locale
修改“压缩模型”名称,增加“生成对话标题”的功能提示
2024-10-13 01:49:51 +08:00
code-october 7f68fb1ff2 修改“压缩模型”名称,增加“生成对话标题”的功能提示 2024-10-12 16:49:24 +00:00
Dogtiti 8d2003fe68
Merge pull request #5644 from ConnectAI-E/fix/siderbar-style
fix: sidebar style
2024-10-12 14:56:01 +08:00
Dogtiti 9961b513cc fix: sidebar style 2024-10-12 14:54:22 +08:00
Dogtiti 819238acaf fix: i18n 2024-10-11 20:49:43 +08:00
Dogtiti ad49916b1c
Merge pull request #5638 from ConnectAI-E/chore/test-action
chore: improve test
2024-10-11 20:44:20 +08:00
Dogtiti d18bd8a48a
Merge pull request #5640 from code-october/feature/enableCodeFold
支持前端使能/禁用代码折叠
2024-10-11 20:43:43 +08:00
code-october 4a1319f2c0 代码安全优化 2024-10-11 11:57:23 +00:00
code-october 8fd843d228 参考coderabbitai建议规范代码 2024-10-11 11:38:52 +00:00
code-october 6792d6e475 支持前端使能/禁用代码折叠 2024-10-11 11:19:36 +00:00
Lloyd Zhou c139038e01
Merge pull request #5639 from code-october/fix/auth-ui
优化访问码输入框
2024-10-11 19:11:35 +08:00
code-october 4a7fd3a380 优化首页 api 输入框 2024-10-11 10:36:11 +00:00
code-october c98dc31cdf 优化访问码输入框 2024-10-11 09:03:20 +00:00
Dogtiti bd43af3a8d chore: cache node_modules 2024-10-11 15:41:46 +08:00
Dogtiti be98aa2078 chore: improve test 2024-10-11 15:17:38 +08:00
lloydzhou a0d4a04192 update 2024-10-11 11:52:24 +08:00
lloydzhou bd9de4dc4d fix version compare 2024-10-11 11:42:36 +08:00
lloydzhou 2eebfcf6fe client app tauri updater #2966 2024-10-11 11:29:22 +08:00
Lloyd Zhou c5074f0aa4
Merge pull request #5581 from ConnectAI-E/feature/gemini-functioncall
google gemini support function call
2024-10-10 21:02:36 +08:00
Lloyd Zhou ba58018a15
Merge pull request #5211 from ConnectAI-E/feature/jest
feat: jest
2024-10-10 21:02:05 +08:00
Lloyd Zhou 63ab83c3c8
Merge pull request #5621 from ConnectAI-E/hotfix/plugin-result
hotfix plugin result is not string #5614
2024-10-10 12:48:55 +08:00
lloydzhou 268cf3b606 hotfix plugin result is not string #5614 2024-10-10 12:47:25 +08:00
Lloyd Zhou fbc68fa776
Merge pull request #5602 from PeterDaveHello/ImproveTwLocale
i18n: improve tw Traditional Chinese locale
2024-10-09 19:38:06 +08:00
lloydzhou 4ae34ea3ee merge main 2024-10-09 18:27:23 +08:00
Lloyd Zhou 96273fd75e
Merge pull request #5611 from ConnectAI-E/feature/tauri-fetch-update
make sure get request_id before body chunk
2024-10-09 16:18:37 +08:00
lloydzhou 3e63d405c1 update 2024-10-09 16:12:01 +08:00
Lloyd Zhou 19b42aac5d
Merge pull request #5608 from ConnectAI-E/fix-readme
fix: [#5574] readme
2024-10-09 14:49:34 +08:00
Lloyd Zhou b67a23200e
Merge pull request #5610 from ChatGPTNextWeb/lloydzhou-patch-1
Update README.md
2024-10-09 14:48:55 +08:00
Lloyd Zhou 1dac02e4d6
Update README.md 2024-10-09 14:48:43 +08:00
Lloyd Zhou acad5b1d08
Merge pull request #5609 from ElricLiu/main
Update README.md
2024-10-09 14:45:27 +08:00
ElricLiu 4e9bb51d2f
Update README.md 2024-10-09 14:43:49 +08:00
DDMeaqua c0c8cdbbf3 fix: [#5574] 文档错误 2024-10-09 14:36:58 +08:00
Lloyd Zhou cbdc611b54
Merge pull request #5607 from ConnectAI-E/hotfix/summarize-model
fix compressModel, related #5426, fix #5606 #5603 #5575
2024-10-09 14:08:13 +08:00
lloydzhou 93ca303b6c fix ts error 2024-10-09 13:49:33 +08:00
lloydzhou a925b424a8 fix compressModel, related #5426, fix #5606 #5603 #5575 2024-10-09 13:42:25 +08:00
Lloyd Zhou 5b4d423b58
Merge pull request #5565 from ConnectAI-E/feature/using-tauri-fetch
Feat: using tauri fetch api in App
2024-10-09 13:03:01 +08:00
lloydzhou 6c1cbe120c update 2024-10-09 11:46:49 +08:00
Peter Dave Hello 77a58bc4b0 i18n: improve tw Traditional Chinese locale 2024-10-09 03:14:38 +08:00
Dogtiti 7d55a6d0e4 feat: allow send image only 2024-10-08 21:31:01 +08:00
Dogtiti 8ad63a6c25
Merge pull request #5586 from little-huang/patch-1
fix: correct typo in variable name from ALLOWD_PATH to ALLOWED_PATH
2024-10-08 15:26:41 +08:00
Dogtiti acf9fa36f9 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/jest 2024-10-08 10:30:47 +08:00
Dogtiti 461154bb03 fix: format package 2024-10-08 10:29:42 +08:00
little_huang cd75461f9e
fix: correct typo in variable name from ALLOWD_PATH to ALLOWED_PATH 2024-10-07 10:30:25 +08:00
Dogtiti 2bac174e6f
Merge pull request #4393 from ChatGPTNextWeb/dean-delete-escapeDollarNumber
bugfix: Delete the escapeDollarNumber function, which causes errors i…
2024-10-06 12:41:03 +08:00
Lloyd Zhou 65f80f81ad
Merge branch 'main' into dean-delete-escapeDollarNumber 2024-10-04 14:31:00 +08:00
lloydzhou 450766a44b google gemini support function call 2024-10-03 20:28:15 +08:00
Lloyd Zhou 05e6e4bffb
Merge pull request #5578 from code-october/fix/safe-equal
use safe equal operation
2024-10-03 10:59:32 +08:00
code-october fbb66a4a5d use safe equal operation 2024-10-03 02:08:10 +00:00
lloydzhou d51d31a559 update 2024-10-01 14:40:23 +08:00
lloydzhou 919ee51dca hover show errorMsg when plugin run error 2024-10-01 13:58:50 +08:00
lloydzhou 9c577ad9d5 hotfix for plugin runtime 2024-10-01 12:55:57 +08:00
lloydzhou 953114041b add connect timeout 2024-10-01 12:02:29 +08:00
lloydzhou d830c23dab hotfix for run plugin call post api 2024-09-30 15:38:13 +08:00
lloydzhou fd3568c459 hotfix for run plugin call post api 2024-09-30 15:33:40 +08:00
lloydzhou 3029dcb2f6 hotfix for run plugin call post api 2024-09-30 15:32:47 +08:00
lloydzhou 35e03e1bca remove code 2024-09-30 13:44:01 +08:00
Lloyd Zhou cea5b91f96
Merge pull request #5567 from ChatGPTNextWeb/fix-readme
update  readme
2024-09-30 13:31:34 +08:00
lyf d2984db6e7 fix readme 2024-09-30 13:28:14 +08:00
lyf deb215ccd1 fix readme 2024-09-30 13:23:24 +08:00
lloydzhou 7173cf2184 update 2024-09-30 13:07:06 +08:00
Lloyd Zhou 0c697e123d
Merge pull request #5564 from code-october/fix/html-code
fix quoteEnd extract regex
2024-09-30 13:06:52 +08:00
lloydzhou edfa6d14ee update 2024-09-30 10:23:24 +08:00
lloydzhou b6d9ba93fa update 2024-09-30 10:18:30 +08:00
lloydzhou 6293b95a3b update default api base url 2024-09-30 10:13:11 +08:00
lloydzhou ef4665cd8b update 2024-09-30 02:57:51 +08:00
lloydzhou 8030e71a5a update 2024-09-30 02:33:02 +08:00
lloydzhou f42488d4cb using stream fetch replace old tauri http fetch 2024-09-30 02:28:19 +08:00
lloydzhou af49ed4fdc update 2024-09-30 01:51:14 +08:00
lloydzhou b174a40634 update 2024-09-30 01:44:27 +08:00
lloydzhou 3c01738c29 update 2024-09-30 01:37:16 +08:00
lloydzhou 9be58f3eb4 fix ts error 2024-09-30 01:30:20 +08:00
lloydzhou a50c282d01 remove DEFAULT_API_HOST 2024-09-30 01:19:20 +08:00
lloydzhou 5141145e4d revert plugin runtime using tarui/api/http, not using fetch_stream 2024-09-30 00:58:50 +08:00
lloydzhou b5f6e5a598 update 2024-09-30 00:38:30 +08:00
lloydzhou 7df308d655 Merge remote-tracking branch 'connectai/main' into feature/using-tauri-fetch 2024-09-29 23:36:17 +08:00
code-october f5ad51a35e fix quoteEnd extract regex 2024-09-29 14:29:42 +00:00
lloydzhou f9d4105170 stash code 2024-09-29 21:47:38 +08:00
lloydzhou 9e6ee50fa6 using stream_fetch in App 2024-09-29 20:32:36 +08:00
lloydzhou dd77ad5d74 Merge remote-tracking branch 'connectai/main' into feature/using-tauri-fetch 2024-09-29 19:44:28 +08:00
lloydzhou 3898c507c4 using stream_fetch in App 2024-09-29 19:44:09 +08:00
Lloyd Zhou fcba50f041
Merge pull request #5547 from ConnectAI-E/hotfix/plugin-opration-id
Hotfix/plugin opration
2024-09-29 16:15:02 +08:00
Lloyd Zhou 452fc86ad1
Merge pull request #5562 from ChatGPTNextWeb/hotfix-google-api
hotfix for `x-goog-api-key`
2024-09-29 15:57:20 +08:00
lloydzhou 5bdf411399 hotfix for `x-goog-api-key` 2024-09-29 15:51:28 +08:00
lloydzhou 2d920f7ccc using stream: schema to fetch in App 2024-09-28 15:05:41 +08:00
lloydzhou d84d51b475 using sse: schema to fetch in App 2024-09-28 01:19:39 +08:00
Dogtiti f9d6f4f9da
Merge pull request #5553 from ConnectAI-E/fix/default-model
fix: default model
2024-09-27 21:13:26 +08:00
Lloyd Zhou a13bd624e8
Merge pull request #5552 from joetsuihk/hotfix/upstream-sync-doc
docs: Hotfix/upstream sync doc update
2024-09-27 20:36:16 +08:00
Joe 8fb019b2e2
revert, leave sync.yml untouched
revert commit 19c4ed4463
2024-09-27 17:34:38 +08:00
Joe 2f3457e73d
Update correct links to manualy code update section (JP) 2024-09-27 17:33:02 +08:00
Dogtiti c6ebd6e73c fix: default model 2024-09-27 17:00:24 +08:00
Joe 2333a47c55
Update links in doc to manual code update section (CN) 2024-09-27 16:50:51 +08:00
Joe b35895b551
Update correct links to manualy code update section 2024-09-27 16:49:08 +08:00
Joe 19c4ed4463
docs links updated sync.yml
https://github.com/Yidadaa/ChatGPT-Next-Web is renamed to https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/
2024-09-27 16:43:50 +08:00
lloydzhou 22aa1698b4 try using method and path when operationId is undefined #5525 2024-09-27 13:31:49 +08:00
lloydzhou 07d089a2bd try using method and path when operationId is undefined #5525 2024-09-27 13:31:07 +08:00
Dogtiti 870ad913cc
Merge pull request #5545 from ConnectAI-E/hotfix/google-auth-header
fix: build error
2024-09-27 11:43:43 +08:00
Dogtiti 3fb389551b fix: build error 2024-09-27 11:42:16 +08:00
Dogtiti d12a4adfb5
Merge pull request #5541 from ConnectAI-E/hotfix/google-auth-header
google api using `x-google-api-key` header
2024-09-27 11:04:10 +08:00
lloydzhou 702e17c96b google api using `x-google-api-key` header 2024-09-26 23:21:42 +08:00
Lloyd Zhou 93ff7d26cc
Merge pull request #5529 from Leizhenpeng/support-saas-readme
Support saas version in readme
2024-09-25 16:34:25 +08:00
river 13777786c4 chore: ja 2024-09-25 16:30:26 +08:00
river 6655c64e55 chore: cn 2024-09-25 16:29:59 +08:00
endless-learner 064e964d75
Updated link to deploy on Alibaba Cloud, readable when not logged in, also, able to choose region. 2024-09-24 23:05:32 -07:00
endless-learner 47fb40d572
Merge branch 'ChatGPTNextWeb:main' into main 2024-09-24 23:03:03 -07:00
river b7892b58f5 chore: support saas 2024-09-25 13:34:04 +08:00
endless-learner 9e18cc260b
Update README.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2024-09-24 13:55:00 -07:00
DDMeaqua 4c63ee23cd feat: #5422 快捷键清除上下文 2024-09-19 15:13:33 +08:00
endless-learner 03268ce4d8
Added 1-click deployment link for Alibaba Cloud. 2024-09-18 20:38:20 -07:00
Dogtiti 1287e39cc6 feat: run test before build 2024-08-06 19:24:47 +08:00
Dogtiti 1ef2aa35e9 feat: jest 2024-08-06 18:03:27 +08:00
butterfly 4d6b981a54 bugfix: Delete the escapeDollarNumber function, which causes errors in rendering a latex string 2024-03-26 11:43:55 +08:00
150 changed files with 12289 additions and 1762 deletions

View File

@ -1,12 +1,20 @@
# Your openai api key. (required) # Your openai api key. (required)
OPENAI_API_KEY=sk-xxxx OPENAI_API_KEY=sk-xxxx
# DeepSeek Api Key. (Optional)
DEEPSEEK_API_KEY=
# Access password, separated by comma. (optional) # Access password, separated by comma. (optional)
CODE=your-password CODE=your-password
# You can start service behind a proxy. (optional) # You can start service behind a proxy. (optional)
PROXY_URL=http://localhost:7890 PROXY_URL=http://localhost:7890
# Enable MCP functionality (optional)
# Default: Empty (disabled)
# Set to "true" to enable MCP functionality
ENABLE_MCP=
# (optional) # (optional)
# Default: Empty # Default: Empty
# Google Gemini Pro API key, set if you want to use Google Gemini Pro API. # Google Gemini Pro API key, set if you want to use Google Gemini Pro API.
@ -67,3 +75,9 @@ ANTHROPIC_URL=
### (optional) ### (optional)
WHITE_WEBDAV_ENDPOINTS= WHITE_WEBDAV_ENDPOINTS=
### siliconflow Api key (optional)
SILICONFLOW_API_KEY=
### siliconflow Api url (optional)
SILICONFLOW_URL=

View File

@ -1 +1,3 @@
public/serviceWorker.js public/serviceWorker.js
app/mcp/mcp_config.json
app/mcp/mcp_config.default.json

View File

@ -3,9 +3,7 @@ name: VercelPreviewDeployment
on: on:
pull_request_target: pull_request_target:
types: types:
- opened - review_requested
- synchronize
- reopened
env: env:
VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}

39
.github/workflows/test.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Run Tests
on:
push:
branches:
- main
tags:
- "!*"
pull_request:
types:
- review_requested
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 18
cache: "yarn"
- name: Cache node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node_modules-
- name: Install dependencies
run: yarn install
- name: Run Jest tests
run: yarn test:ci

3
.gitignore vendored
View File

@ -46,3 +46,6 @@ dev
*.key.pub *.key.pub
masks.json masks.json
# mcp config
app/mcp/mcp_config.json

View File

@ -34,12 +34,16 @@ ENV PROXY_URL=""
ENV OPENAI_API_KEY="" ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY="" ENV GOOGLE_API_KEY=""
ENV CODE="" ENV CODE=""
ENV ENABLE_MCP=""
COPY --from=builder /app/public ./public COPY --from=builder /app/public ./public
COPY --from=builder /app/.next/standalone ./ COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder /app/.next/server ./.next/server COPY --from=builder /app/.next/server ./.next/server
RUN mkdir -p /app/app/mcp && chmod 777 /app/app/mcp
COPY --from=builder /app/app/mcp/mcp_config.default.json /app/app/mcp/mcp_config.json
EXPOSE 3000 EXPOSE 3000
CMD if [ -n "$PROXY_URL" ]; then \ CMD if [ -n "$PROXY_URL" ]; then \

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023-2024 Zhang Yifei Copyright (c) 2023-2025 NextChat
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

193
README.md
View File

@ -1,26 +1,32 @@
<div align="center"> <div align="center">
<a href='#企业版'> <a href='https://nextchat.club'>
<img src="./docs/images/ent.svg" alt="icon"/> <img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a> </a>
<h1 align="center">NextChat (ChatGPT Next Web)</h1>
<h1 align="center">NextChat</h1>
English / [简体中文](./README_CN.md) English / [简体中文](./README_CN.md)
One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. <a href="https://trendshift.io/repositories/5973" target="_blank"><img src="https://trendshift.io/api/badge/repositories/5973" alt="ChatGPTNextWeb%2FChatGPT-Next-Web | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
✨ Light and Fast AI Assistant,with Claude, DeepSeek, GPT4 & Gemini Pro support.
[![Saas][Saas-image]][saas-url]
[![Web][Web-image]][web-url] [![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url] [![Windows][Windows-image]][download-url]
[![MacOS][MacOS-image]][download-url] [![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url] [![Linux][Linux-image]][download-url]
[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) [NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
[saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.dev/ [web-url]: https://app.nextchat.dev/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
@ -28,12 +34,25 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://vercel.com/button" alt="Deploy on Vercel" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/ChatGPTNextWeb/NextChat)
[<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="60" width="288" >](https://monica.im/?utm=nxcrp) [<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="50" width="" >](https://monica.im/?utm=nxcrp)
</div> </div>
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
## 🫣 NextChat Support MCP !
> Before build, please set env ENABLE_MCP=true
<img src="https://github.com/user-attachments/assets/d8851f40-4e36-4335-b1a4-ec1e11488c7e"/>
## Enterprise Edition ## Enterprise Edition
Meeting Your Company's Privatization and Customization Deployment Requirements: Meeting Your Company's Privatization and Customization Deployment Requirements:
@ -47,20 +66,12 @@ Meeting Your Company's Privatization and Customization Deployment Requirements:
For enterprise inquiries, please contact: **business@nextchat.dev** For enterprise inquiries, please contact: **business@nextchat.dev**
## 企业版 ## Screenshots
满足企业用户私有化部署和个性化定制需求: ![Settings](./docs/images/settings.png)
- **品牌定制**:企业量身定制 VI/UI与企业品牌形象无缝契合
- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用
- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制
- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求
- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范
- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护
- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进
企业版咨询: **business@nextchat.dev** ![More](./docs/images/more.png)
<img width="300" src="https://github.com/user-attachments/assets/3daeb7b6-ab63-4542-9141-2e4a12c80601">
## Features ## Features
@ -93,10 +104,12 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
- [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) - [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- [ ] local knowledge base - [ ] local knowledge base
## What's New ## What's New
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.10.1 support Google Gemini Pro model.
@ -105,48 +118,8 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! - 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). - 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
## 主要功能
- 在 1 分钟内使用 Vercel **免费一键部署**
- 提供体积极小(~5MB的跨平台客户端Linux/Windows/MacOS, [下载地址](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
- 完整的 Markdown 支持LaTex 公式、Mermaid 流程图、代码高亮等等
- 精心设计的 UI响应式设计支持深色模式支持 PWA
- 极快的首屏加载速度(~100kb支持流式响应
- 隐私安全,所有数据保存在用户浏览器本地
- 预制角色功能(面具),方便地创建、分享和调试你的个性化对话
- 海量的内置 prompt 列表,来自[中文](https://github.com/PlexPt/awesome-chatgpt-prompts-zh)和[英文](https://github.com/f/awesome-chatgpt-prompts)
- 自动压缩上下文聊天记录,在节省 Token 的同时支持超长对话
- 多国语言支持English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia
- 拥有自己的域名?好上加好,绑定后即可在任何地方**无障碍**快速访问
## 开发计划
- [x] 为每个对话设置系统 Prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)
- [x] 允许用户自行编辑内置 Prompt 列表
- [x] 预制角色:使用预制角色快速定制新对话 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993)
- [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741)
- [x] 使用 tauri 打包桌面应用
- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm)
- [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [ ] 本地知识库
## 最新动态
- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 现在支持 Artifacts & SD 了。
- 🚀 v2.10.1 现在支持 Gemini Pro 模型。
- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。
- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。
- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。
- 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。
- 💡 想要更方便地随时随地使用本项目可以试下这款桌面插件https://github.com/mushan0x0/AI0x0.com
## Get Started ## Get Started
> [简体中文 > 如何开始使用](./README_CN.md#开始使用)
1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys); 1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. Click 2. Click
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password; [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password;
@ -154,14 +127,10 @@ For enterprise inquiries, please contact: **business@nextchat.dev**
## FAQ ## FAQ
[简体中文 > 常见问题](./docs/faq-cn.md)
[English > FAQ](./docs/faq-en.md) [English > FAQ](./docs/faq-en.md)
## Keep Updated ## Keep Updated
> [简体中文 > 如何保持代码更新](./README_CN.md#保持更新)
If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly. If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly.
We recommend that you follow the steps below to re-deploy: We recommend that you follow the steps below to re-deploy:
@ -172,7 +141,7 @@ We recommend that you follow the steps below to re-deploy:
### Enable Automatic Updates ### Enable Automatic Updates
> If you encounter a failure of Upstream Sync execution, please manually sync fork once. > If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code).
After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour:
@ -188,8 +157,6 @@ You can star or watch this project or follow author to get release notifications
## Access Password ## Access Password
> [简体中文 > 如何增加访问密码](./README_CN.md#配置页面访问密码)
This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this: This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this:
``` ```
@ -200,8 +167,6 @@ After adding or modifying this environment variable, please redeploy the project
## Environment Variables ## Environment Variables
> [简体中文 > 如何配置 api key、访问密码、接口代理](./README_CN.md#环境变量)
### `CODE` (optional) ### `CODE` (optional)
Access password, separated by comma. Access password, separated by comma.
@ -296,6 +261,22 @@ iflytek Api Key.
iflytek Api Secret. iflytek Api Secret.
### `CHATGLM_API_KEY` (optional)
ChatGLM Api Key.
### `CHATGLM_URL` (optional)
ChatGLM Api Url.
### `DEEPSEEK_API_KEY` (optional)
DeepSeek Api Key.
### `DEEPSEEK_URL` (optional)
DeepSeek Api Url.
### `HIDE_USER_API_KEY` (optional) ### `HIDE_USER_API_KEY` (optional)
> Default: Empty > Default: Empty
@ -329,9 +310,9 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model
User `-all` to disable all default models, `+all` to enable all default models. User `-all` to disable all default models, `+all` to enable all default models.
For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. > Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list. > If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
@ -340,6 +321,13 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name
Change default model Change default model
### `VISION_MODELS` (optional)
> Default: Empty
> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc).
Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas.
### `WHITE_WEBDAV_ENDPOINTS` (optional) ### `WHITE_WEBDAV_ENDPOINTS` (optional)
You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format
@ -359,13 +347,25 @@ Stability API key.
Customize Stability API url. Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature
### `SILICONFLOW_API_KEY` (optional)
SiliconFlow API Key.
### `SILICONFLOW_URL` (optional)
SiliconFlow API URL.
## Requirements ## Requirements
NodeJS >= 18, Docker >= 20 NodeJS >= 18, Docker >= 20
## Development ## Development
> [简体中文 > 如何进行二次开发](./README_CN.md#开发)
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
@ -390,7 +390,6 @@ yarn dev
## Deployment ## Deployment
> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署)
### Docker (Recommended) ### Docker (Recommended)
@ -419,6 +418,16 @@ If your proxy needs password, use:
-e PROXY_URL="http://127.0.0.1:7890 user pass" -e PROXY_URL="http://127.0.0.1:7890 user pass"
``` ```
If enable MCP, use
```
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=your-password \
-e ENABLE_MCP=true \
yidadaa/chatgpt-next-web
```
### Shell ### Shell
```shell ```shell
@ -439,11 +448,7 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s
- [How to use Vercel (No English)](./docs/vercel-cn.md) - [How to use Vercel (No English)](./docs/vercel-cn.md)
- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md) - [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md)
## Screenshots
![Settings](./docs/images/settings.png)
![More](./docs/images/more.png)
## Translation ## Translation
@ -455,37 +460,7 @@ If you want to add a new translation, read this [document](./docs/translation.md
## Special Thanks ## Special Thanks
### Sponsor
> 仅列出捐赠金额 >= 100RMB 的用户。
[@mushan0x0](https://github.com/mushan0x0)
[@ClarenceDan](https://github.com/ClarenceDan)
[@zhangjia](https://github.com/zhangjia)
[@hoochanlon](https://github.com/hoochanlon)
[@relativequantum](https://github.com/relativequantum)
[@desenmeng](https://github.com/desenmeng)
[@webees](https://github.com/webees)
[@chazzhou](https://github.com/chazzhou)
[@hauy](https://github.com/hauy)
[@Corwin006](https://github.com/Corwin006)
[@yankunsong](https://github.com/yankunsong)
[@ypwhs](https://github.com/ypwhs)
[@fxxxchao](https://github.com/fxxxchao)
[@hotic](https://github.com/hotic)
[@WingCH](https://github.com/WingCH)
[@jtung4](https://github.com/jtung4)
[@micozhu](https://github.com/micozhu)
[@jhansion](https://github.com/jhansion)
[@Sha1rholder](https://github.com/Sha1rholder)
[@AnsonHyq](https://github.com/AnsonHyq)
[@synwith](https://github.com/synwith)
[@piksonGit](https://github.com/piksonGit)
[@ouyangzhiping](https://github.com/ouyangzhiping)
[@wenjiavv](https://github.com/wenjiavv)
[@LeXwDeX](https://github.com/LeXwDeX)
[@Licoy](https://github.com/Licoy)
[@shangmin2009](https://github.com/shangmin2009)
### Contributors ### Contributors

View File

@ -6,9 +6,9 @@
<h1 align="center">NextChat</h1> <h1 align="center">NextChat</h1>
一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。
[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
@ -27,7 +27,8 @@
企业版咨询: **business@nextchat.dev** 企业版咨询: **business@nextchat.dev**
<img width="300" src="https://github.com/user-attachments/assets/3daeb7b6-ab63-4542-9141-2e4a12c80601"> <img width="300" src="https://github.com/user-attachments/assets/bb29a11d-ff75-48a8-b1f8-d2d7238cf987">
## 开始使用 ## 开始使用
@ -54,7 +55,7 @@
### 打开自动更新 ### 打开自动更新
> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! > 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)
当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows并启用 Upstream Sync Action启用之后即可开启每小时定时自动更新 当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows并启用 Upstream Sync Action启用之后即可开启每小时定时自动更新
@ -88,7 +89,7 @@ code1,code2,code3
### `OPENAI_API_KEY` (必填项) ### `OPENAI_API_KEY` (必填项)
OpanAI 密钥,你在 openai 账户页面申请的 api key使用英文逗号隔开多个 key这样可以随机轮询这些 key。 OpenAI 密钥,你在 openai 账户页面申请的 api key使用英文逗号隔开多个 key这样可以随机轮询这些 key。
### `CODE` (可选) ### `CODE` (可选)
@ -184,6 +185,21 @@ ByteDance Api Url.
讯飞星火Api Secret. 讯飞星火Api Secret.
### `CHATGLM_API_KEY` (可选)
ChatGLM Api Key.
### `CHATGLM_URL` (可选)
ChatGLM Api Url.
### `DEEPSEEK_API_KEY` (可选)
DeepSeek Api Key.
### `DEEPSEEK_URL` (可选)
DeepSeek Api Url.
### `HIDE_USER_API_KEY` (可选) ### `HIDE_USER_API_KEY` (可选)
@ -216,9 +232,9 @@ ByteDance Api Url.
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
在Azure的模式下支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) 在Azure的模式下支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 > 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
> 如果你只能使用Azure模式那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` > 如果你只能使用Azure模式那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
在ByteDance的模式下支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) 在ByteDance的模式下支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 > 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项
@ -228,6 +244,13 @@ ByteDance Api Url.
更改默认模型 更改默认模型
### `VISION_MODELS` (可选)
> 默认值:空
> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。
在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。
### `DEFAULT_INPUT_TEMPLATE` (可选) ### `DEFAULT_INPUT_TEMPLATE` (可选)
自定义默认的 template用于初始化『设置』中的『用户输入预处理』配置项 自定义默认的 template用于初始化『设置』中的『用户输入预处理』配置项
@ -240,6 +263,17 @@ Stability API密钥
自定义的Stability API请求地址 自定义的Stability API请求地址
### `ENABLE_MCP` (optional)
启用MCPModel Context Protocol功能
### `SILICONFLOW_API_KEY` (optional)
SiliconFlow API Key.
### `SILICONFLOW_URL` (optional)
SiliconFlow API URL.
## 开发 ## 开发
@ -264,6 +298,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy
## 部署 ## 部署
### 宝塔面板部署
> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
### 容器部署 (推荐) ### 容器部署 (推荐)
> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。
@ -290,6 +327,16 @@ docker run -d -p 3000:3000 \
yidadaa/chatgpt-next-web yidadaa/chatgpt-next-web
``` ```
如需启用 MCP 功能,可以使用:
```shell
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=页面访问密码 \
-e ENABLE_MCP=true \
yidadaa/chatgpt-next-web
```
如果你的本地代理需要账号密码,可以使用: 如果你的本地代理需要账号密码,可以使用:
```shell ```shell

View File

@ -5,7 +5,7 @@
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
[企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
@ -54,7 +54,7 @@
### 自動更新を開く ### 自動更新を開く
> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください! > Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください!
プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります:
@ -207,8 +207,8 @@ ByteDance API の URL。
モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。 モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。
Azure モードでは、`modelName@azure=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。 Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例:`+gpt-3.5-turbo@azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 > 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。
ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。 ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。 > 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。
@ -217,6 +217,13 @@ ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデ
デフォルトのモデルを変更します。 デフォルトのモデルを変更します。
### `VISION_MODELS` (オプション)
> デフォルト:空
> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。
デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。
### `DEFAULT_INPUT_TEMPLATE` (オプション) ### `DEFAULT_INPUT_TEMPLATE` (オプション)
『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。 『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。

View File

@ -10,6 +10,10 @@ import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot"; import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability"; import { handle as stabilityHandler } from "../../stability";
import { handle as iflytekHandler } from "../../iflytek"; import { handle as iflytekHandler } from "../../iflytek";
import { handle as deepseekHandler } from "../../deepseek";
import { handle as siliconflowHandler } from "../../siliconflow";
import { handle as xaiHandler } from "../../xai";
import { handle as chatglmHandler } from "../../glm";
import { handle as proxyHandler } from "../../proxy"; import { handle as proxyHandler } from "../../proxy";
async function handle( async function handle(
@ -38,6 +42,14 @@ async function handle(
return stabilityHandler(req, { params }); return stabilityHandler(req, { params });
case ApiPath.Iflytek: case ApiPath.Iflytek:
return iflytekHandler(req, { params }); return iflytekHandler(req, { params });
case ApiPath.DeepSeek:
return deepseekHandler(req, { params });
case ApiPath.XAI:
return xaiHandler(req, { params });
case ApiPath.ChatGLM:
return chatglmHandler(req, { params });
case ApiPath.SiliconFlow:
return siliconflowHandler(req, { params });
case ApiPath.OpenAI: case ApiPath.OpenAI:
return openaiHandler(req, { params }); return openaiHandler(req, { params });
default: default:

View File

@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth"; import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.Alibaba as string, ServiceProvider.Alibaba as string,

View File

@ -9,7 +9,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth"; import { auth } from "./auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
@ -122,7 +122,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.Anthropic as string, ServiceProvider.Anthropic as string,

View File

@ -92,6 +92,18 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
systemApiKey = systemApiKey =
serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
break; break;
case ModelProvider.DeepSeek:
systemApiKey = serverConfig.deepseekApiKey;
break;
case ModelProvider.XAI:
systemApiKey = serverConfig.xaiApiKey;
break;
case ModelProvider.ChatGLM:
systemApiKey = serverConfig.chatglmApiKey;
break;
case ModelProvider.SiliconFlow:
systemApiKey = serverConfig.siliconFlowApiKey;
break;
case ModelProvider.GPT: case ModelProvider.GPT:
default: default:
if (req.nextUrl.pathname.includes("azure/deployments")) { if (req.nextUrl.pathname.includes("azure/deployments")) {

View File

@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth"; import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu"; import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -104,7 +104,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.Baidu as string, ServiceProvider.Baidu as string,

View File

@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth"; import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.ByteDance as string, ServiceProvider.ByteDance as string,

View File

@ -1,8 +1,8 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server"; import { getServerSideConfig } from "../config/server";
import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { isModelAvailableInServer } from "../utils/model";
import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -71,7 +71,7 @@ export async function requestOpenai(req: NextRequest) {
.filter((v) => !!v && !v.startsWith("-") && v.includes(modelName)) .filter((v) => !!v && !v.startsWith("-") && v.includes(modelName))
.forEach((m) => { .forEach((m) => {
const [fullName, displayName] = m.split("="); const [fullName, displayName] = m.split("=");
const [_, providerName] = fullName.split("@"); const [_, providerName] = getModelProvider(fullName);
if (providerName === "azure" && !displayName) { if (providerName === "azure" && !displayName) {
const [_, deployId] = (serverConfig?.azureUrl ?? "").split( const [_, deployId] = (serverConfig?.azureUrl ?? "").split(
"deployments/", "deployments/",
@ -118,15 +118,14 @@ export async function requestOpenai(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.OpenAI as string, [
) || ServiceProvider.OpenAI,
isModelAvailableInServer( ServiceProvider.Azure,
serverConfig.customModels, jsonBody?.model as string, // support provider-unspecified model
jsonBody?.model as string, ],
ServiceProvider.Azure as string,
) )
) { ) {
return NextResponse.json( return NextResponse.json(

View File

@ -14,6 +14,7 @@ const DANGER_CONFIG = {
disableFastLink: serverConfig.disableFastLink, disableFastLink: serverConfig.disableFastLink,
customModels: serverConfig.customModels, customModels: serverConfig.customModels,
defaultModel: serverConfig.defaultModel, defaultModel: serverConfig.defaultModel,
visionModels: serverConfig.visionModels,
}; };
declare global { declare global {

128
app/api/deepseek.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
DEEPSEEK_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[DeepSeek Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.DeepSeek);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[DeepSeek] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");
let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.DeepSeek as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[DeepSeek] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

129
app/api/glm.ts Normal file
View File

@ -0,0 +1,129 @@
import { getServerSideConfig } from "@/app/config/server";
import {
CHATGLM_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[GLM Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.ChatGLM);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[GLM] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, "");
let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
console.log("[Fetch Url] ", fetchUrl);
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ChatGLM as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[GLM] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -23,7 +23,8 @@ export async function handle(
}); });
} }
const bearToken = req.headers.get("Authorization") ?? ""; const bearToken =
req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || "";
const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const token = bearToken.trim().replaceAll("Bearer ", "").trim();
const apiKey = token ? token : serverConfig.googleApiKey; const apiKey = token ? token : serverConfig.googleApiKey;
@ -91,8 +92,8 @@ async function request(req: NextRequest, apiKey: string) {
}, },
10 * 60 * 1000, 10 * 60 * 1000,
); );
const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ const fetchUrl = `${baseUrl}${path}${
req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : ""
}`; }`;
console.log("[Fetch Url] ", fetchUrl); console.log("[Fetch Url] ", fetchUrl);
@ -100,6 +101,9 @@ async function request(req: NextRequest, apiKey: string) {
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
"Cache-Control": "no-store", "Cache-Control": "no-store",
"x-goog-api-key":
req.headers.get("x-goog-api-key") ||
(req.headers.get("Authorization") ?? "").replace("Bearer ", ""),
}, },
method: req.method, method: req.method,
body: req.body, body: req.body,

View File

@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth"; import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek // iflytek
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -89,7 +89,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.Iflytek as string, ServiceProvider.Iflytek as string,

View File

@ -8,7 +8,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth"; import { auth } from "@/app/api/auth";
import { isModelAvailableInServer } from "@/app/utils/model"; import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
@ -88,7 +88,7 @@ async function request(req: NextRequest) {
// not undefined and is false // not undefined and is false
if ( if (
isModelAvailableInServer( isModelNotavailableInServer(
serverConfig.customModels, serverConfig.customModels,
jsonBody?.model as string, jsonBody?.model as string,
ServiceProvider.Moonshot as string, ServiceProvider.Moonshot as string,

View File

@ -6,7 +6,7 @@ import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth"; import { auth } from "./auth";
import { requestOpenai } from "./common"; import { requestOpenai } from "./common";
const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
function getModels(remoteModelRes: OpenAIListModelResponse) { function getModels(remoteModelRes: OpenAIListModelResponse) {
const config = getServerSideConfig(); const config = getServerSideConfig();
@ -14,8 +14,12 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
if (config.disableGPT4) { if (config.disableGPT4) {
remoteModelRes.data = remoteModelRes.data.filter( remoteModelRes.data = remoteModelRes.data.filter(
(m) => (m) =>
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) || !(
m.id.startsWith("gpt-4o-mini"), m.id.startsWith("gpt-4") ||
m.id.startsWith("chatgpt-4o") ||
m.id.startsWith("o1") ||
m.id.startsWith("o3")
) || m.id.startsWith("gpt-4o-mini"),
); );
} }
@ -34,7 +38,7 @@ export async function handle(
const subpath = params.path.join("/"); const subpath = params.path.join("/");
if (!ALLOWD_PATH.has(subpath)) { if (!ALLOWED_PATH.has(subpath)) {
console.log("[OpenAI Route] forbidden path ", subpath); console.log("[OpenAI Route] forbidden path ", subpath);
return NextResponse.json( return NextResponse.json(
{ {

View File

@ -1,4 +1,5 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";
export async function handle( export async function handle(
req: NextRequest, req: NextRequest,
@ -9,6 +10,7 @@ export async function handle(
if (req.method === "OPTIONS") { if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 }); return NextResponse.json({ body: "OK" }, { status: 200 });
} }
const serverConfig = getServerSideConfig();
// remove path params from searchParams // remove path params from searchParams
req.nextUrl.searchParams.delete("path"); req.nextUrl.searchParams.delete("path");
@ -31,6 +33,18 @@ export async function handle(
return true; return true;
}), }),
); );
// if dalle3 use openai api key
const baseUrl = req.headers.get("x-base-url");
if (baseUrl?.includes("api.openai.com")) {
if (!serverConfig.apiKey) {
return NextResponse.json(
{ error: "OpenAI API key not configured" },
{ status: 500 },
);
}
headers.set("Authorization", `Bearer ${serverConfig.apiKey}`);
}
const controller = new AbortController(); const controller = new AbortController();
const fetchOptions: RequestInit = { const fetchOptions: RequestInit = {
headers, headers,

128
app/api/siliconflow.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
SILICONFLOW_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[SiliconFlow Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.SiliconFlow);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[SiliconFlow] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.SiliconFlow, "");
let baseUrl = serverConfig.siliconFlowUrl || SILICONFLOW_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.SiliconFlow as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[SiliconFlow] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

128
app/api/xai.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
XAI_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[XAI Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.XAI);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[XAI] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, "");
let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.XAI as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[XAI] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -20,6 +20,10 @@ import { QwenApi } from "./platforms/alibaba";
import { HunyuanApi } from "./platforms/tencent"; import { HunyuanApi } from "./platforms/tencent";
import { MoonshotApi } from "./platforms/moonshot"; import { MoonshotApi } from "./platforms/moonshot";
import { SparkApi } from "./platforms/iflytek"; import { SparkApi } from "./platforms/iflytek";
import { DeepSeekApi } from "./platforms/deepseek";
import { XAIApi } from "./platforms/xai";
import { ChatGLMApi } from "./platforms/glm";
import { SiliconflowApi } from "./platforms/siliconflow";
export const ROLES = ["system", "user", "assistant"] as const; export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number]; export type MessageRole = (typeof ROLES)[number];
@ -36,6 +40,11 @@ export interface MultimodalContent {
}; };
} }
export interface MultimodalContentForAlibaba {
text?: string;
image?: string;
}
export interface RequestMessage { export interface RequestMessage {
role: MessageRole; role: MessageRole;
content: string | MultimodalContent[]; content: string | MultimodalContent[];
@ -68,7 +77,7 @@ export interface ChatOptions {
config: LLMConfig; config: LLMConfig;
onUpdate?: (message: string, chunk: string) => void; onUpdate?: (message: string, chunk: string) => void;
onFinish: (message: string) => void; onFinish: (message: string, responseRes: Response) => void;
onError?: (err: Error) => void; onError?: (err: Error) => void;
onController?: (controller: AbortController) => void; onController?: (controller: AbortController) => void;
onBeforeTool?: (tool: ChatMessageTool) => void; onBeforeTool?: (tool: ChatMessageTool) => void;
@ -152,6 +161,18 @@ export class ClientApi {
case ModelProvider.Iflytek: case ModelProvider.Iflytek:
this.llm = new SparkApi(); this.llm = new SparkApi();
break; break;
case ModelProvider.DeepSeek:
this.llm = new DeepSeekApi();
break;
case ModelProvider.XAI:
this.llm = new XAIApi();
break;
case ModelProvider.ChatGLM:
this.llm = new ChatGLMApi();
break;
case ModelProvider.SiliconFlow:
this.llm = new SiliconflowApi();
break;
default: default:
this.llm = new ChatGPTApi(); this.llm = new ChatGPTApi();
} }
@ -231,7 +252,7 @@ export function getHeaders(ignoreHeaders: boolean = false) {
function getConfig() { function getConfig() {
const modelConfig = chatStore.currentSession().mask.modelConfig; const modelConfig = chatStore.currentSession().mask.modelConfig;
const isGoogle = modelConfig.providerName == ServiceProvider.Google; const isGoogle = modelConfig.providerName === ServiceProvider.Google;
const isAzure = modelConfig.providerName === ServiceProvider.Azure; const isAzure = modelConfig.providerName === ServiceProvider.Azure;
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
@ -239,6 +260,11 @@ export function getHeaders(ignoreHeaders: boolean = false) {
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
const isSiliconFlow =
modelConfig.providerName === ServiceProvider.SiliconFlow;
const isEnabledAccessControl = accessStore.enabledAccessControl(); const isEnabledAccessControl = accessStore.enabledAccessControl();
const apiKey = isGoogle const apiKey = isGoogle
? accessStore.googleApiKey ? accessStore.googleApiKey
@ -252,6 +278,14 @@ export function getHeaders(ignoreHeaders: boolean = false) {
? accessStore.alibabaApiKey ? accessStore.alibabaApiKey
: isMoonshot : isMoonshot
? accessStore.moonshotApiKey ? accessStore.moonshotApiKey
: isXAI
? accessStore.xaiApiKey
: isDeepSeek
? accessStore.deepseekApiKey
: isChatGLM
? accessStore.chatglmApiKey
: isSiliconFlow
? accessStore.siliconflowApiKey
: isIflytek : isIflytek
? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
@ -266,13 +300,23 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAlibaba, isAlibaba,
isMoonshot, isMoonshot,
isIflytek, isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
isSiliconFlow,
apiKey, apiKey,
isEnabledAccessControl, isEnabledAccessControl,
}; };
} }
function getAuthHeader(): string { function getAuthHeader(): string {
return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; return isAzure
? "api-key"
: isAnthropic
? "x-api-key"
: isGoogle
? "x-goog-api-key"
: "Authorization";
} }
const { const {
@ -280,17 +324,26 @@ export function getHeaders(ignoreHeaders: boolean = false) {
isAzure, isAzure,
isAnthropic, isAnthropic,
isBaidu, isBaidu,
isByteDance,
isAlibaba,
isMoonshot,
isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
isSiliconFlow,
apiKey, apiKey,
isEnabledAccessControl, isEnabledAccessControl,
} = getConfig(); } = getConfig();
// when using google api in app, not set auth header
if (isGoogle && clientConfig?.isApp) return headers;
// when using baidu api in app, not set auth header // when using baidu api in app, not set auth header
if (isBaidu && clientConfig?.isApp) return headers; if (isBaidu && clientConfig?.isApp) return headers;
const authHeader = getAuthHeader(); const authHeader = getAuthHeader();
const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); const bearerToken = getBearerToken(
apiKey,
isAzure || isAnthropic || isGoogle,
);
if (bearerToken) { if (bearerToken) {
headers[authHeader] = bearerToken; headers[authHeader] = bearerToken;
@ -321,6 +374,14 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
return new ClientApi(ModelProvider.Moonshot); return new ClientApi(ModelProvider.Moonshot);
case ServiceProvider.Iflytek: case ServiceProvider.Iflytek:
return new ClientApi(ModelProvider.Iflytek); return new ClientApi(ModelProvider.Iflytek);
case ServiceProvider.DeepSeek:
return new ClientApi(ModelProvider.DeepSeek);
case ServiceProvider.XAI:
return new ClientApi(ModelProvider.XAI);
case ServiceProvider.ChatGLM:
return new ClientApi(ModelProvider.ChatGLM);
case ServiceProvider.SiliconFlow:
return new ClientApi(ModelProvider.SiliconFlow);
default: default:
return new ClientApi(ModelProvider.GPT); return new ClientApi(ModelProvider.GPT);
} }

View File

@ -1,12 +1,16 @@
"use client"; "use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
Alibaba, useAppConfig,
ALIBABA_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import {
preProcessImageContentForAlibabaDashScope,
streamWithThink,
} from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@ -14,15 +18,16 @@ import {
LLMModel, LLMModel,
SpeechOptions, SpeechOptions,
MultimodalContent, MultimodalContent,
MultimodalContentForAlibaba,
} from "../api"; } from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -89,11 +94,6 @@ export class QwenApi implements LLMApi {
} }
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
role: v.role,
content: getMessageTextContent(v),
}));
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig,
@ -102,6 +102,21 @@ export class QwenApi implements LLMApi {
}, },
}; };
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = (
visionModel
? await preProcessImageContentForAlibabaDashScope(v.content)
: v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v)
) as any;
messages.push({ role: v.role, content });
}
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = { const requestPayload: RequestPayload = {
model: modelConfig.model, model: modelConfig.model,
@ -121,138 +136,127 @@ export class QwenApi implements LLMApi {
options.onController?.(controller); options.onController?.(controller);
try { try {
const chatPath = this.path(Alibaba.ChatPath); const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
signal: controller.signal, signal: controller.signal,
headers: { headers: headers,
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
},
}; };
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Alibaba] request response content type: ",
contentType,
); );
return streamWithThink(
if (contentType?.startsWith("text/plain")) { chatPath,
responseText = await res.clone().text(); requestPayload,
return finish(); headers,
} tools as any,
funcs,
if ( controller,
!res.ok || // parseSSE
!res.headers (text: string, runTools: ChatMessageTool[]) => {
.get("content-type") // console.log("parseSSE", text, runTools);
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text); const json = JSON.parse(text);
const choices = json.output.choices as Array<{ const choices = json.output.choices as Array<{
message: { content: string }; message: {
content: string | null | MultimodalContentForAlibaba[];
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>; }>;
const delta = choices[0]?.message?.content;
if (delta) { if (!choices?.length) return { isThinking: false, content: "" };
remainText += delta;
} const tool_calls = choices[0]?.message?.tool_calls;
} catch (e) { if (tool_calls?.length > 0) {
console.error("[Request] parse error", text, msg); const index = tool_calls[0]?.index;
} const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
}, },
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
}); });
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: Array.isArray(content)
? content.map((item) => item.text).join(",")
: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -8,11 +8,12 @@ import {
ChatMessageTool, ChatMessageTool,
} from "@/app/store"; } from "@/app/store";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant"; import { ANTHROPIC_BASE_URL } from "@/app/constant";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { preProcessImageContent, stream } from "@/app/utils/chat"; import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export type MultiBlockContent = { export type MultiBlockContent = {
type: "image" | "text"; type: "image" | "text";
@ -316,13 +317,14 @@ export class ClaudeApi implements LLMApi {
}; };
try { try {
controller.signal.onabort = () => options.onFinish(""); controller.signal.onabort = () =>
options.onFinish("", new Response(null, { status: 400 }));
const res = await fetch(path, payload); const res = await fetch(path, payload);
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} catch (e) { } catch (e) {
console.error("failed to chat", e); console.error("failed to chat", e);
options.onError?.(e as Error); options.onError?.(e as Error);
@ -388,9 +390,7 @@ export class ClaudeApi implements LLMApi {
if (baseUrl.trim().length === 0) { if (baseUrl.trim().length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
? DEFAULT_API_HOST + "/api/proxy/anthropic"
: ApiPath.Anthropic;
} }
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {

View File

@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
ApiPath,
Baidu,
BAIDU_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu"; import { getAccessToken } from "@/app/utils/baidu";
@ -23,7 +18,8 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -154,13 +150,14 @@ export class ErnieApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; let responseText = "";
let remainText = ""; let remainText = "";
let finished = false; let finished = false;
let responseRes: Response;
// animate response to make it looks smooth // animate response to make it looks smooth
function animateResponseText() { function animateResponseText() {
@ -190,19 +187,20 @@ export class ErnieApi implements LLMApi {
const finish = () => { const finish = () => {
if (!finished) { if (!finished) {
finished = true; finished = true;
options.onFinish(responseText + remainText); options.onFinish(responseText + remainText, responseRes);
} }
}; };
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type"); const contentType = res.headers.get("content-type");
console.log("[Baidu] request response content type: ", contentType); console.log("[Baidu] request response content type: ", contentType);
responseRes = res;
if (contentType?.startsWith("text/plain")) { if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text(); responseText = await res.clone().text();
return finish(); return finish();
@ -265,7 +263,7 @@ export class ErnieApi implements LLMApi {
const resJson = await res.json(); const resJson = await res.json();
const message = resJson?.result; const message = resJson?.result;
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -1,11 +1,12 @@
"use client"; "use client";
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
ByteDance, useAppConfig,
BYTEDANCE_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { import {
ChatOptions, ChatOptions,
@ -15,14 +16,15 @@ import {
MultimodalContent, MultimodalContent,
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales";
import { import { streamWithThink } from "@/app/utils/chat";
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat";
import {
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -33,7 +35,7 @@ export interface OpenAIListModelResponse {
}>; }>;
} }
interface RequestPayload { interface RequestPayloadForByteDance {
messages: { messages: {
role: "system" | "user" | "assistant"; role: "system" | "user" | "assistant";
content: string | MultimodalContent[]; content: string | MultimodalContent[];
@ -83,10 +85,14 @@ export class DoubaoApi implements LLMApi {
} }
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages: ChatOptions["messages"] = [];
role: v.role, for (const v of options.messages) {
content: getMessageTextContent(v), const content =
})); v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,
@ -97,7 +103,7 @@ export class DoubaoApi implements LLMApi {
}; };
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = { const requestPayload: RequestPayloadForByteDance = {
messages, messages,
stream: shouldStream, stream: shouldStream,
model: modelConfig.model, model: modelConfig.model,
@ -122,124 +128,108 @@ export class DoubaoApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[ByteDance] request response content type: ",
contentType,
); );
return streamWithThink(
if (contentType?.startsWith("text/plain")) { chatPath,
responseText = await res.clone().text(); requestPayload,
return finish(); getHeaders(),
} tools as any,
funcs,
if ( controller,
!res.ok || // parseSSE
!res.headers (text: string, runTools: ChatMessageTool[]) => {
.get("content-type") // console.log("parseSSE", text, runTools);
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text); const json = JSON.parse(text);
const choices = json.choices as Array<{ const choices = json.choices as Array<{
delta: { content: string }; delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>; }>;
const delta = choices[0]?.delta?.content;
if (delta) { if (!choices?.length) return { isThinking: false, content: "" };
remainText += delta;
} const tool_calls = choices[0]?.delta?.tool_calls;
} catch (e) { if (tool_calls?.length > 0) {
console.error("[Request] parse error", text, msg); const index = tool_calls[0]?.index;
} const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
}, },
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
}); });
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayloadForByteDance,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.messages?.splice(
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -0,0 +1,253 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class DeepSeekApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.deepseekUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.DeepSeek;
baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content });
} else {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
}
// 检测并修复消息顺序确保除system外的第一个消息是user
const filteredMessages: ChatOptions["messages"] = [];
let hasFoundFirstUser = false;
for (const msg of messages) {
if (msg.role === "system") {
// Keep all system messages
filteredMessages.push(msg);
} else if (msg.role === "user") {
// User message directly added
filteredMessages.push(msg);
hasFoundFirstUser = true;
} else if (hasFoundFirstUser) {
// After finding the first user message, all subsequent non-system messages are retained.
filteredMessages.push(msg);
}
// If hasFoundFirstUser is false and it is not a system message, it will be skipped.
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages: filteredMessages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(DeepSeek.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

292
app/client/platforms/glm.ts Normal file
View File

@ -0,0 +1,292 @@
"use client";
import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat";
interface BasePayload {
model: string;
}
interface ChatPayload extends BasePayload {
messages: ChatOptions["messages"];
stream?: boolean;
temperature?: number;
presence_penalty?: number;
frequency_penalty?: number;
top_p?: number;
}
interface ImageGenerationPayload extends BasePayload {
prompt: string;
size?: string;
user_id?: string;
}
interface VideoGenerationPayload extends BasePayload {
prompt: string;
duration?: number;
resolution?: string;
user_id?: string;
}
type ModelType = "chat" | "image" | "video";
export class ChatGLMApi implements LLMApi {
private disableListModels = true;
private getModelType(model: string): ModelType {
if (model.startsWith("cogview-")) return "image";
if (model.startsWith("cogvideo-")) return "video";
return "chat";
}
private getModelPath(type: ModelType): string {
switch (type) {
case "image":
return ChatGLM.ImagePath;
case "video":
return ChatGLM.VideoPath;
default:
return ChatGLM.ChatPath;
}
}
private createPayload(
messages: ChatOptions["messages"],
modelConfig: any,
options: ChatOptions,
): BasePayload {
const modelType = this.getModelType(modelConfig.model);
const lastMessage = messages[messages.length - 1];
const prompt =
typeof lastMessage.content === "string"
? lastMessage.content
: lastMessage.content.map((c) => c.text).join("\n");
switch (modelType) {
case "image":
return {
model: modelConfig.model,
prompt,
size: options.config.size,
} as ImageGenerationPayload;
default:
return {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
} as ChatPayload;
}
}
private parseResponse(modelType: ModelType, json: any): string {
switch (modelType) {
case "image": {
const imageUrl = json.data?.[0]?.url;
return imageUrl ? `![Generated Image](${imageUrl})` : "";
}
case "video": {
const videoUrl = json.data?.[0]?.url;
return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
}
default:
return this.extractMessage(json);
}
}
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.chatglmUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.ChatGLM;
baseUrl = isApp ? CHATGLM_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const modelType = this.getModelType(modelConfig.model);
const requestPayload = this.createPayload(messages, modelConfig, options);
const path = this.path(this.getModelPath(modelType));
console.log(`[Request] glm ${modelType} payload: `, requestPayload);
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (modelType === "image" || modelType === "video") {
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
console.log(`[Response] glm ${modelType}:`, resJson);
const message = this.parseResponse(modelType, resJson);
options.onFinish(message, res);
return;
}
const shouldStream = !!options.config.stream;
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
path,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -1,4 +1,4 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, Google } from "@/app/constant";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@ -7,24 +7,30 @@ import {
LLMUsage, LLMUsage,
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant";
import Locale from "../../locales";
import { import {
EventStreamContentType, useAccessStore,
fetchEventSource, useAppConfig,
} from "@fortaine/fetch-event-source"; useChatStore,
import { prettyObject } from "@/app/utils/format"; usePluginStore,
ChatMessageTool,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import { getClientConfig } from "@/app/config/client";
import { GEMINI_BASE_URL } from "@/app/constant";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageImages, getMessageImages,
isVisionModel, isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class GeminiProApi implements LLMApi { export class GeminiProApi implements LLMApi {
path(path: string): string { path(path: string, shouldStream = false): string {
const accessStore = useAccessStore.getState(); const accessStore = useAccessStore.getState();
let baseUrl = ""; let baseUrl = "";
@ -34,7 +40,7 @@ export class GeminiProApi implements LLMApi {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google; baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1); baseUrl = baseUrl.slice(0, baseUrl.length - 1);
@ -46,19 +52,34 @@ export class GeminiProApi implements LLMApi {
console.log("[Proxy Endpoint] ", baseUrl, path); console.log("[Proxy Endpoint] ", baseUrl, path);
let chatPath = [baseUrl, path].join("/"); let chatPath = [baseUrl, path].join("/");
if (shouldStream) {
chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
// if chatPath.startsWith('http') then add key in query string
if (chatPath.startsWith("http") && accessStore.googleApiKey) {
chatPath += `&key=${accessStore.googleApiKey}`;
} }
return chatPath; return chatPath;
} }
extractMessage(res: any) { extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res); console.log("[Response] gemini-pro response: ", res);
const getTextFromParts = (parts: any[]) => {
if (!Array.isArray(parts)) return "";
return parts
.map((part) => part?.text || "")
.filter((text) => text.trim() !== "")
.join("\n\n");
};
let content = "";
if (Array.isArray(res)) {
res.map((item) => {
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
});
}
return ( return (
res?.candidates?.at(0)?.content?.parts.at(0)?.text || getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message || res?.error?.message ||
"" ""
); );
@ -165,7 +186,10 @@ export class GeminiProApi implements LLMApi {
options.onController?.(controller); options.onController?.(controller);
try { try {
// https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
const chatPath = this.path(Google.ChatPath(modelConfig.model)); const chatPath = this.path(
Google.ChatPath(modelConfig.model),
shouldStream,
);
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
@ -174,121 +198,95 @@ export class GeminiProApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
const isThinking = options.config.model.includes("-thinking");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText);
}
};
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
finish();
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Gemini] request response content type: ",
contentType,
); );
return stream(
chatPath,
requestPayload,
getHeaders(),
// @ts-ignore
tools.length > 0
? // @ts-ignore
[{ functionDeclarations: tools.map((tool) => tool.function) }]
: [],
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const chunkJson = JSON.parse(text);
if (contentType?.startsWith("text/plain")) { const functionCall = chunkJson?.candidates
responseText = await res.clone().text(); ?.at(0)
return finish(); ?.content.parts.at(0)?.functionCall;
} if (functionCall) {
const { name, args } = functionCall;
if ( runTools.push({
!res.ok || id: nanoid(),
!res.headers type: "function",
.get("content-type") function: {
?.startsWith(EventStreamContentType) || name,
res.status !== 200 arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
}, },
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text);
const delta = apiClient.extractMessage(json);
if (delta) {
remainText += delta;
}
const blockReason = json?.promptFeedback?.blockReason;
if (blockReason) {
// being blocked
console.log(`[Google] [Safety Ratings] result:`, blockReason);
}
} catch (e) {
console.error("[Request] parse error", text, msg);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
}); });
}
return chunkJson?.candidates
?.at(0)
?.content.parts?.map((part: { text: string }) => part.text)
.join("\n\n");
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.contents?.splice(
// @ts-ignore
requestPayload?.contents?.length,
0,
{
role: "model",
parts: toolCallMessage.tool_calls.map(
(tool: ChatMessageTool) => ({
functionCall: {
name: tool?.function?.name,
args: JSON.parse(tool?.function?.arguments as string),
},
}),
),
},
// @ts-ignore
...toolCallResult.map((result) => ({
role: "function",
parts: [
{
functionResponse: {
name: result.name,
response: {
name: result.name,
content: result.content, // TODO just text content...
},
},
},
],
})),
);
},
options,
);
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
@ -303,7 +301,7 @@ export class GeminiProApi implements LLMApi {
); );
} }
const message = apiClient.extractMessage(resJson); const message = apiClient.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -1,7 +1,7 @@
"use client"; "use client";
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, IFLYTEK_BASE_URL,
Iflytek, Iflytek,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
} from "@/app/constant"; } from "@/app/constant";
@ -22,6 +22,7 @@ import {
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
@ -40,7 +41,7 @@ export class SparkApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Iflytek; const apiPath = ApiPath.Iflytek;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -116,6 +117,7 @@ export class SparkApi implements LLMApi {
let responseText = ""; let responseText = "";
let remainText = ""; let remainText = "";
let finished = false; let finished = false;
let responseRes: Response;
// Animate response text to make it look smooth // Animate response text to make it look smooth
function animateResponseText() { function animateResponseText() {
@ -142,19 +144,20 @@ export class SparkApi implements LLMApi {
const finish = () => { const finish = () => {
if (!finished) { if (!finished) {
finished = true; finished = true;
options.onFinish(responseText + remainText); options.onFinish(responseText + remainText, responseRes);
} }
}; };
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type"); const contentType = res.headers.get("content-type");
console.log("[Spark] request response content type: ", contentType); console.log("[Spark] request response content type: ", contentType);
responseRes = res;
if (contentType?.startsWith("text/plain")) { if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text(); responseText = await res.clone().text();
return finish(); return finish();
@ -229,7 +232,7 @@ export class SparkApi implements LLMApi {
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -2,7 +2,7 @@
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, MOONSHOT_BASE_URL,
Moonshot, Moonshot,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
} from "@/app/constant"; } from "@/app/constant";
@ -24,6 +24,7 @@ import {
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class MoonshotApi implements LLMApi { export class MoonshotApi implements LLMApi {
private disableListModels = true; private disableListModels = true;
@ -40,7 +41,7 @@ export class MoonshotApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Moonshot; const apiPath = ApiPath.Moonshot;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -179,7 +180,7 @@ export class MoonshotApi implements LLMApi {
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -2,7 +2,7 @@
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, OPENAI_BASE_URL,
DEFAULT_MODELS, DEFAULT_MODELS,
OpenaiPath, OpenaiPath,
Azure, Azure,
@ -21,10 +21,10 @@ import {
preProcessImageContent, preProcessImageContent,
uploadImage, uploadImage,
base64Image2Blob, base64Image2Blob,
stream, streamWithThink,
} from "@/app/utils/chat"; } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
import { import {
ChatOptions, ChatOptions,
@ -41,7 +41,9 @@ import {
getMessageTextContent, getMessageTextContent,
isVisionModel, isVisionModel,
isDalle3 as _isDalle3, isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -64,6 +66,7 @@ export interface RequestPayload {
frequency_penalty: number; frequency_penalty: number;
top_p: number; top_p: number;
max_tokens?: number; max_tokens?: number;
max_completion_tokens?: number;
} }
export interface DalleRequestPayload { export interface DalleRequestPayload {
@ -71,7 +74,7 @@ export interface DalleRequestPayload {
prompt: string; prompt: string;
response_format: "url" | "b64_json"; response_format: "url" | "b64_json";
n: number; n: number;
size: DalleSize; size: ModelSize;
quality: DalleQuality; quality: DalleQuality;
style: DalleStyle; style: DalleStyle;
} }
@ -98,7 +101,7 @@ export class ChatGPTApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -193,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload; let requestPayload: RequestPayload | DalleRequestPayload;
const isDalle3 = _isDalle3(options.config.model); const isDalle3 = _isDalle3(options.config.model);
const isO1 = options.config.model.startsWith("o1"); const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
if (isDalle3) { if (isDalle3) {
const prompt = getMessageTextContent( const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any, options.messages.slice(-1)?.pop() as any,
@ -215,23 +220,28 @@ export class ChatGPTApi implements LLMApi {
const content = visionModel const content = visionModel
? await preProcessImageContent(v.content) ? await preProcessImageContent(v.content)
: getMessageTextContent(v); : getMessageTextContent(v);
if (!(isO1 && v.role === "system")) if (!(isO1OrO3 && v.role === "system"))
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
requestPayload = { requestPayload = {
messages, messages,
stream: !isO1 ? options.config.stream : false, stream: options.config.stream,
model: modelConfig.model, model: modelConfig.model,
temperature: !isO1 ? modelConfig.temperature : 1, temperature: !isO1OrO3 ? modelConfig.temperature : 1,
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1 ? modelConfig.top_p : 1, top_p: !isO1OrO3 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024), // max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model // add max_tokens to vision model
if (visionModel) { if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
@ -240,7 +250,7 @@ export class ChatGPTApi implements LLMApi {
console.log("[Request] openai payload: ", requestPayload); console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !isDalle3 && !!options.config.stream && !isO1; const shouldStream = !isDalle3 && !!options.config.stream;
const controller = new AbortController(); const controller = new AbortController();
options.onController?.(controller); options.onController?.(controller);
@ -284,7 +294,7 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [], useChatStore.getState().currentSession().mask?.plugin || [],
); );
// console.log("getAsTools", tools, funcs); // console.log("getAsTools", tools, funcs);
stream( streamWithThink(
chatPath, chatPath,
requestPayload, requestPayload,
getHeaders(), getHeaders(),
@ -299,8 +309,12 @@ export class ChatGPTApi implements LLMApi {
delta: { delta: {
content: string; content: string;
tool_calls: ChatMessageTool[]; tool_calls: ChatMessageTool[];
reasoning_content: string | null;
}; };
}>; }>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.delta?.tool_calls; const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) { if (tool_calls?.length > 0) {
const id = tool_calls[0]?.id; const id = tool_calls[0]?.id;
@ -320,7 +334,37 @@ export class ChatGPTApi implements LLMApi {
runTools[index]["function"]["arguments"] += args; runTools[index]["function"]["arguments"] += args;
} }
} }
return choices[0]?.delta?.content;
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
}, },
// processToolMessage, include tool_calls message and tool call results // processToolMessage, include tool_calls message and tool call results
( (
@ -352,7 +396,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. getTimeoutMSByModel(options.config.model),
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
@ -360,7 +404,7 @@ export class ChatGPTApi implements LLMApi {
const resJson = await res.json(); const resJson = await res.json();
const message = await this.extractMessage(resJson); const message = await this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

View File

@ -0,0 +1,287 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
SILICONFLOW_BASE_URL,
SiliconFlow,
DEFAULT_MODELS,
} from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export interface SiliconFlowListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
export class SiliconflowApi implements LLMApi {
private disableListModels = false;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.siliconflowUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.SiliconFlow;
baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (
!baseUrl.startsWith("http") &&
!baseUrl.startsWith(ApiPath.SiliconFlow)
) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content });
} else {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(SiliconFlow.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// console.log(chatPayload);
// Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});
const resJson = (await res.json()) as SiliconFlowListModelResponse;
const chatModels = resJson.data;
console.log("[Models]", chatModels);
if (!chatModels) {
return [];
}
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
}));
}
}

View File

@ -1,5 +1,5 @@
"use client"; "use client";
import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { import {
@ -17,11 +17,16 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import mapKeys from "lodash-es/mapKeys"; import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues"; import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray"; import isArray from "lodash-es/isArray";
import isObject from "lodash-es/isObject"; import isObject from "lodash-es/isObject";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -70,9 +75,7 @@ export class HunyuanApi implements LLMApi {
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
? DEFAULT_API_HOST + "/api/proxy/tencent"
: ApiPath.Tencent;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
@ -136,13 +139,14 @@ export class HunyuanApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; let responseText = "";
let remainText = ""; let remainText = "";
let finished = false; let finished = false;
let responseRes: Response;
// animate response to make it looks smooth // animate response to make it looks smooth
function animateResponseText() { function animateResponseText() {
@ -172,13 +176,14 @@ export class HunyuanApi implements LLMApi {
const finish = () => { const finish = () => {
if (!finished) { if (!finished) {
finished = true; finished = true;
options.onFinish(responseText + remainText); options.onFinish(responseText + remainText, responseRes);
} }
}; };
controller.signal.onabort = finish; controller.signal.onabort = finish;
fetchEventSource(chatPath, { fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload, ...chatPayload,
async onopen(res) { async onopen(res) {
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
@ -187,7 +192,7 @@ export class HunyuanApi implements LLMApi {
"[Tencent] request response content type: ", "[Tencent] request response content type: ",
contentType, contentType,
); );
responseRes = res;
if (contentType?.startsWith("text/plain")) { if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text(); responseText = await res.clone().text();
return finish(); return finish();
@ -253,7 +258,7 @@ export class HunyuanApi implements LLMApi {
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);

194
app/client/platforms/xai.ts Normal file
View File

@ -0,0 +1,194 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class XAIApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.xaiUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.XAI;
baseUrl = isApp ? XAI_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
};
console.log("[Request] xai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(XAI.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -11,12 +11,15 @@ import Logo from "../icons/logo.svg";
import { useMobileScreen } from "@/app/utils"; import { useMobileScreen } from "@/app/utils";
import BotIcon from "../icons/bot.svg"; import BotIcon from "../icons/bot.svg";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { PasswordInput } from "./ui-lib";
import LeftIcon from "@/app/icons/left.svg"; import LeftIcon from "@/app/icons/left.svg";
import { safeLocalStorage } from "@/app/utils"; import { safeLocalStorage } from "@/app/utils";
import { import {
trackSettingsPageGuideToCPaymentClick, trackSettingsPageGuideToCPaymentClick,
trackAuthorizationPageButtonToCPaymentClick, trackAuthorizationPageButtonToCPaymentClick,
} from "../utils/auth-settings-events"; } from "../utils/auth-settings-events";
import clsx from "clsx";
const storage = safeLocalStorage(); const storage = safeLocalStorage();
export function AuthPage() { export function AuthPage() {
@ -53,43 +56,50 @@ export function AuthPage() {
onClick={() => navigate(Path.Home)} onClick={() => navigate(Path.Home)}
></IconButton> ></IconButton>
</div> </div>
<div className={`no-dark ${styles["auth-logo"]}`}> <div className={clsx("no-dark", styles["auth-logo"])}>
<BotIcon /> <BotIcon />
</div> </div>
<div className={styles["auth-title"]}>{Locale.Auth.Title}</div> <div className={styles["auth-title"]}>{Locale.Auth.Title}</div>
<div className={styles["auth-tips"]}>{Locale.Auth.Tips}</div> <div className={styles["auth-tips"]}>{Locale.Auth.Tips}</div>
<input <PasswordInput
className={styles["auth-input"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Auth.Input} aria-label={Locale.Auth.Input}
value={accessStore.accessCode} value={accessStore.accessCode}
type="text"
placeholder={Locale.Auth.Input}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.accessCode = e.currentTarget.value), (access) => (access.accessCode = e.currentTarget.value),
); );
}} }}
/> />
{!accessStore.hideUserApiKey ? ( {!accessStore.hideUserApiKey ? (
<> <>
<div className={styles["auth-tips"]}>{Locale.Auth.SubTips}</div> <div className={styles["auth-tips"]}>{Locale.Auth.SubTips}</div>
<input <PasswordInput
className={styles["auth-input"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder} aria-label={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
value={accessStore.openaiApiKey} value={accessStore.openaiApiKey}
type="text"
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.openaiApiKey = e.currentTarget.value), (access) => (access.openaiApiKey = e.currentTarget.value),
); );
}} }}
/> />
<input <PasswordInput
className={styles["auth-input-second"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder} aria-label={Locale.Settings.Access.Google.ApiKey.Placeholder}
value={accessStore.googleApiKey} value={accessStore.googleApiKey}
type="text"
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.googleApiKey = e.currentTarget.value), (access) => (access.googleApiKey = e.currentTarget.value),
@ -155,7 +165,7 @@ function TopBanner() {
onMouseEnter={handleMouseEnter} onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave} onMouseLeave={handleMouseLeave}
> >
<div className={`${styles["top-banner-inner"]} no-dark`}> <div className={clsx(styles["top-banner-inner"], "no-dark")}>
<Logo className={styles["top-banner-logo"]}></Logo> <Logo className={styles["top-banner-logo"]}></Logo>
<span> <span>
{Locale.Auth.TopTips} {Locale.Auth.TopTips}

View File

@ -2,6 +2,7 @@ import * as React from "react";
import styles from "./button.module.scss"; import styles from "./button.module.scss";
import { CSSProperties } from "react"; import { CSSProperties } from "react";
import clsx from "clsx";
export type ButtonType = "primary" | "danger" | null; export type ButtonType = "primary" | "danger" | null;
@ -22,12 +23,16 @@ export function IconButton(props: {
}) { }) {
return ( return (
<button <button
className={ className={clsx(
styles["icon-button"] + "clickable",
` ${props.bordered && styles.border} ${props.shadow && styles.shadow} ${ styles["icon-button"],
props.className ?? "" {
} clickable ${styles[props.type ?? ""]}` [styles.border]: props.bordered,
} [styles.shadow]: props.shadow,
},
styles[props.type ?? ""],
props.className,
)}
onClick={props.onClick} onClick={props.onClick}
title={props.title} title={props.title}
disabled={props.disabled} disabled={props.disabled}
@ -40,10 +45,9 @@ export function IconButton(props: {
{props.icon && ( {props.icon && (
<div <div
aria-label={props.text || props.title} aria-label={props.text || props.title}
className={ className={clsx(styles["icon-button-icon"], {
styles["icon-button-icon"] + "no-dark": props.type === "primary",
` ${props.type === "primary" && "no-dark"}` })}
}
> >
{props.icon} {props.icon}
</div> </div>

View File

@ -18,6 +18,7 @@ import { Mask } from "../store/mask";
import { useRef, useEffect } from "react"; import { useRef, useEffect } from "react";
import { showConfirm } from "./ui-lib"; import { showConfirm } from "./ui-lib";
import { useMobileScreen } from "../utils"; import { useMobileScreen } from "../utils";
import clsx from "clsx";
export function ChatItem(props: { export function ChatItem(props: {
onClick?: () => void; onClick?: () => void;
@ -45,11 +46,11 @@ export function ChatItem(props: {
<Draggable draggableId={`${props.id}`} index={props.index}> <Draggable draggableId={`${props.id}`} index={props.index}>
{(provided) => ( {(provided) => (
<div <div
className={`${styles["chat-item"]} ${ className={clsx(styles["chat-item"], {
[styles["chat-item-selected"]]:
props.selected && props.selected &&
(currentPath === Path.Chat || currentPath === Path.Home) && (currentPath === Path.Chat || currentPath === Path.Home),
styles["chat-item-selected"] })}
}`}
onClick={props.onClick} onClick={props.onClick}
ref={(ele) => { ref={(ele) => {
draggableRef.current = ele; draggableRef.current = ele;
@ -63,7 +64,7 @@ export function ChatItem(props: {
> >
{props.narrow ? ( {props.narrow ? (
<div className={styles["chat-item-narrow"]}> <div className={styles["chat-item-narrow"]}>
<div className={styles["chat-item-avatar"] + " no-dark"}> <div className={clsx(styles["chat-item-avatar"], "no-dark")}>
<MaskAvatar <MaskAvatar
avatar={props.mask.avatar} avatar={props.mask.avatar}
model={props.mask.modelConfig.model} model={props.mask.modelConfig.model}

View File

@ -45,6 +45,14 @@
.chat-input-actions { .chat-input-actions {
display: flex; display: flex;
flex-wrap: wrap; flex-wrap: wrap;
justify-content: space-between;
gap: 5px;
&-end {
display: flex;
margin-left: auto;
gap: 5px;
}
.chat-input-action { .chat-input-action {
display: inline-flex; display: inline-flex;
@ -62,10 +70,6 @@
width: var(--icon-width); width: var(--icon-width);
overflow: hidden; overflow: hidden;
&:not(:last-child) {
margin-right: 5px;
}
.text { .text {
white-space: nowrap; white-space: nowrap;
padding-left: 5px; padding-left: 5px;
@ -231,10 +235,12 @@
animation: slide-in ease 0.3s; animation: slide-in ease 0.3s;
$linear: linear-gradient(to right, $linear: linear-gradient(
to right,
rgba(0, 0, 0, 0), rgba(0, 0, 0, 0),
rgba(0, 0, 0, 1), rgba(0, 0, 0, 1),
rgba(0, 0, 0, 0)); rgba(0, 0, 0, 0)
);
mask-image: $linear; mask-image: $linear;
@mixin show { @mixin show {
@ -443,6 +449,25 @@
transition: all ease 0.3s; transition: all ease 0.3s;
} }
.chat-message-audio {
display: flex;
align-items: center;
justify-content: space-between;
border-radius: 10px;
background-color: rgba(0, 0, 0, 0.05);
border: var(--border-in-light);
position: relative;
transition: all ease 0.3s;
margin-top: 10px;
font-size: 14px;
user-select: text;
word-break: break-word;
box-sizing: border-box;
audio {
height: 30px; /* 调整高度 */
}
}
.chat-message-item-image { .chat-message-item-image {
width: 100%; width: 100%;
margin-top: 10px; margin-top: 10px;
@ -471,7 +496,6 @@
border: rgba($color: #888, $alpha: 0.2) 1px solid; border: rgba($color: #888, $alpha: 0.2) 1px solid;
} }
@media only screen and (max-width: 600px) { @media only screen and (max-width: 600px) {
$calc-image-width: calc(100vw / 3 * 2 / var(--image-count)); $calc-image-width: calc(100vw / 3 * 2 / var(--image-count));
@ -486,8 +510,13 @@
} }
@media screen and (min-width: 600px) { @media screen and (min-width: 600px) {
$max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count)); $max-image-width: calc(
$image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count)); calc(1200px - var(--sidebar-width)) / 3 * 2 / var(--image-count)
);
$image-width: calc(
calc(var(--window-width) - var(--sidebar-width)) / 3 * 2 /
var(--image-count)
);
.chat-message-item-image-multi { .chat-message-item-image-multi {
width: $image-width; width: $image-width;
@ -626,7 +655,8 @@
min-height: 68px; min-height: 68px;
} }
.chat-input:focus {} .chat-input:focus {
}
.chat-input-send { .chat-input-send {
background-color: var(--primary); background-color: var(--primary);
@ -694,3 +724,30 @@
font-size: 12px; font-size: 12px;
color: var(--black); color: var(--black);
} }
.chat-main {
display: flex;
height: 100%;
width: 100%;
position: relative;
overflow: hidden;
.chat-body-container {
height: 100%;
display: flex;
flex-direction: column;
flex: 1;
width: 100%;
}
.chat-side-panel {
position: absolute;
inset: 0;
background: var(--white);
overflow: hidden;
z-index: 10;
transform: translateX(100%);
transition: all ease 0.3s;
&-show {
transform: translateX(0);
}
}
}

View File

@ -1,17 +1,18 @@
import { useDebouncedCallback } from "use-debounce"; import { useDebouncedCallback } from "use-debounce";
import React, { import React, {
useState,
useRef,
useEffect,
useMemo,
useCallback,
Fragment, Fragment,
RefObject, RefObject,
useCallback,
useEffect,
useMemo,
useRef,
useState,
} from "react"; } from "react";
import SendWhiteIcon from "../icons/send-white.svg"; import SendWhiteIcon from "../icons/send-white.svg";
import BrainIcon from "../icons/brain.svg"; import BrainIcon from "../icons/brain.svg";
import RenameIcon from "../icons/rename.svg"; import RenameIcon from "../icons/rename.svg";
import EditIcon from "../icons/rename.svg";
import ExportIcon from "../icons/share.svg"; import ExportIcon from "../icons/share.svg";
import ReturnIcon from "../icons/return.svg"; import ReturnIcon from "../icons/return.svg";
import CopyIcon from "../icons/copy.svg"; import CopyIcon from "../icons/copy.svg";
@ -24,11 +25,11 @@ import MaskIcon from "../icons/mask.svg";
import MaxIcon from "../icons/max.svg"; import MaxIcon from "../icons/max.svg";
import MinIcon from "../icons/min.svg"; import MinIcon from "../icons/min.svg";
import ResetIcon from "../icons/reload.svg"; import ResetIcon from "../icons/reload.svg";
import ReloadIcon from "../icons/reload.svg";
import BreakIcon from "../icons/break.svg"; import BreakIcon from "../icons/break.svg";
import SettingsIcon from "../icons/chat-settings.svg"; import SettingsIcon from "../icons/chat-settings.svg";
import DeleteIcon from "../icons/clear.svg"; import DeleteIcon from "../icons/clear.svg";
import PinIcon from "../icons/pin.svg"; import PinIcon from "../icons/pin.svg";
import EditIcon from "../icons/rename.svg";
import ConfirmIcon from "../icons/confirm.svg"; import ConfirmIcon from "../icons/confirm.svg";
import CloseIcon from "../icons/close.svg"; import CloseIcon from "../icons/close.svg";
import CancelIcon from "../icons/cancel.svg"; import CancelIcon from "../icons/cancel.svg";
@ -45,33 +46,35 @@ import QualityIcon from "../icons/hd.svg";
import StyleIcon from "../icons/palette.svg"; import StyleIcon from "../icons/palette.svg";
import PluginIcon from "../icons/plugin.svg"; import PluginIcon from "../icons/plugin.svg";
import ShortcutkeyIcon from "../icons/shortcutkey.svg"; import ShortcutkeyIcon from "../icons/shortcutkey.svg";
import ReloadIcon from "../icons/reload.svg"; import McpToolIcon from "../icons/tool.svg";
import HeadphoneIcon from "../icons/headphone.svg";
import { import {
ChatMessage,
SubmitKey,
useChatStore,
BOT_HELLO, BOT_HELLO,
ChatMessage,
createMessage, createMessage,
useAccessStore,
Theme,
useAppConfig,
DEFAULT_TOPIC, DEFAULT_TOPIC,
ModelType, ModelType,
SubmitKey,
Theme,
useAccessStore,
useAppConfig,
useChatStore,
usePluginStore, usePluginStore,
} from "../store"; } from "../store";
import { import {
copyToClipboard,
selectOrCopy,
autoGrowTextArea, autoGrowTextArea,
useMobileScreen, copyToClipboard,
getMessageTextContent,
getMessageImages, getMessageImages,
isVisionModel, getMessageTextContent,
isDalle3, isDalle3,
showPlugins, isVisionModel,
safeLocalStorage, safeLocalStorage,
getModelSizes,
supportsCustomSize,
useMobileScreen,
selectOrCopy,
showPlugins,
} from "../utils"; } from "../utils";
import { uploadImage as uploadImageRemote } from "@/app/utils/chat"; import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
@ -79,7 +82,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic"; import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller"; import { ChatControllerPool } from "../client/controller";
import { DalleSize, DalleQuality, DalleStyle } from "../typing"; import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt"; import { Prompt, usePromptStore } from "../store/prompt";
import Locale from "../locales"; import Locale from "../locales";
@ -102,8 +105,8 @@ import {
ModelProvider, ModelProvider,
Path, Path,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
UNFINISHED_INPUT,
ServiceProvider, ServiceProvider,
UNFINISHED_INPUT,
} from "../constant"; } from "../constant";
import { Avatar } from "./emoji"; import { Avatar } from "./emoji";
import { ContextPrompts, MaskAvatar, MaskConfig } from "./mask"; import { ContextPrompts, MaskAvatar, MaskConfig } from "./mask";
@ -113,19 +116,52 @@ import { prettyObject } from "../utils/format";
import { ExportMessageModal } from "./exporter"; import { ExportMessageModal } from "./exporter";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { useAllModels } from "../utils/hooks"; import { useAllModels } from "../utils/hooks";
import { MultimodalContent } from "../client/api"; import { ClientApi, MultimodalContent } from "../client/api";
const localStorage = safeLocalStorage();
import { ClientApi } from "../client/api";
import { createTTSPlayer } from "../utils/audio"; import { createTTSPlayer } from "../utils/audio";
import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts"; import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts";
import { isEmpty } from "lodash-es";
import { getModelProvider } from "../utils/model";
import { RealtimeChat } from "@/app/components/realtime-chat";
import clsx from "clsx";
import { getAvailableClientsCount, isMcpEnabled } from "../mcp/actions";
const localStorage = safeLocalStorage();
const ttsPlayer = createTTSPlayer(); const ttsPlayer = createTTSPlayer();
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />, loading: () => <LoadingIcon />,
}); });
const MCPAction = () => {
const navigate = useNavigate();
const [count, setCount] = useState<number>(0);
const [mcpEnabled, setMcpEnabled] = useState(false);
useEffect(() => {
const checkMcpStatus = async () => {
const enabled = await isMcpEnabled();
setMcpEnabled(enabled);
if (enabled) {
const count = await getAvailableClientsCount();
setCount(count);
}
};
checkMcpStatus();
}, []);
if (!mcpEnabled) return null;
return (
<ChatAction
onClick={() => navigate(Path.McpMarket)}
text={`MCP${count ? ` (${count})` : ""}`}
icon={<McpToolIcon />}
/>
);
};
export function SessionConfigModel(props: { onClose: () => void }) { export function SessionConfigModel(props: { onClose: () => void }) {
const chatStore = useChatStore(); const chatStore = useChatStore();
const session = chatStore.currentSession(); const session = chatStore.currentSession();
@ -145,7 +181,8 @@ export function SessionConfigModel(props: { onClose: () => void }) {
text={Locale.Chat.Config.Reset} text={Locale.Chat.Config.Reset}
onClick={async () => { onClick={async () => {
if (await showConfirm(Locale.Memory.ResetConfirm)) { if (await showConfirm(Locale.Memory.ResetConfirm)) {
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session.memoryPrompt = ""), (session) => (session.memoryPrompt = ""),
); );
} }
@ -170,7 +207,10 @@ export function SessionConfigModel(props: { onClose: () => void }) {
updateMask={(updater) => { updateMask={(updater) => {
const mask = { ...session.mask }; const mask = { ...session.mask };
updater(mask); updater(mask);
chatStore.updateCurrentSession((session) => (session.mask = mask)); chatStore.updateTargetSession(
session,
(session) => (session.mask = mask),
);
}} }}
shouldSyncFromGlobal shouldSyncFromGlobal
extraListItems={ extraListItems={
@ -203,7 +243,7 @@ function PromptToast(props: {
<div className={styles["prompt-toast"]} key="prompt-toast"> <div className={styles["prompt-toast"]} key="prompt-toast">
{props.showToast && context.length > 0 && ( {props.showToast && context.length > 0 && (
<div <div
className={styles["prompt-toast-inner"] + " clickable"} className={clsx(styles["prompt-toast-inner"], "clickable")}
role="button" role="button"
onClick={() => props.setShowModal(true)} onClick={() => props.setShowModal(true)}
> >
@ -324,10 +364,9 @@ export function PromptHints(props: {
{props.prompts.map((prompt, i) => ( {props.prompts.map((prompt, i) => (
<div <div
ref={i === selectIndex ? selectedRef : null} ref={i === selectIndex ? selectedRef : null}
className={ className={clsx(styles["prompt-hint"], {
styles["prompt-hint"] + [styles["prompt-hint-selected"]]: i === selectIndex,
` ${i === selectIndex ? styles["prompt-hint-selected"] : ""}` })}
}
key={prompt.title + i.toString()} key={prompt.title + i.toString()}
onClick={() => props.onPromptSelect(prompt)} onClick={() => props.onPromptSelect(prompt)}
onMouseEnter={() => setSelectIndex(i)} onMouseEnter={() => setSelectIndex(i)}
@ -342,12 +381,14 @@ export function PromptHints(props: {
function ClearContextDivider() { function ClearContextDivider() {
const chatStore = useChatStore(); const chatStore = useChatStore();
const session = chatStore.currentSession();
return ( return (
<div <div
className={styles["clear-context"]} className={styles["clear-context"]}
onClick={() => onClick={() =>
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session.clearContextIndex = undefined), (session) => (session.clearContextIndex = undefined),
) )
} }
@ -385,7 +426,7 @@ export function ChatAction(props: {
return ( return (
<div <div
className={`${styles["chat-input-action"]} clickable`} className={clsx(styles["chat-input-action"], "clickable")}
onClick={() => { onClick={() => {
props.onClick(); props.onClick();
setTimeout(updateWidth, 1); setTimeout(updateWidth, 1);
@ -412,11 +453,11 @@ export function ChatAction(props: {
function useScrollToBottom( function useScrollToBottom(
scrollRef: RefObject<HTMLDivElement>, scrollRef: RefObject<HTMLDivElement>,
detach: boolean = false, detach: boolean = false,
messages: ChatMessage[],
) { ) {
// for auto-scroll // for auto-scroll
const [autoScroll, setAutoScroll] = useState(true); const [autoScroll, setAutoScroll] = useState(true);
function scrollDomToBottom() { const scrollDomToBottom = useCallback(() => {
const dom = scrollRef.current; const dom = scrollRef.current;
if (dom) { if (dom) {
requestAnimationFrame(() => { requestAnimationFrame(() => {
@ -424,7 +465,7 @@ function useScrollToBottom(
dom.scrollTo(0, dom.scrollHeight); dom.scrollTo(0, dom.scrollHeight);
}); });
} }
} }, [scrollRef]);
// auto scroll // auto scroll
useEffect(() => { useEffect(() => {
@ -433,6 +474,15 @@ function useScrollToBottom(
} }
}); });
// auto scroll when messages length changes
const lastMessagesLength = useRef(messages.length);
useEffect(() => {
if (messages.length > lastMessagesLength.current && !detach) {
scrollDomToBottom();
}
lastMessagesLength.current = messages.length;
}, [messages.length, detach, scrollDomToBottom]);
return { return {
scrollRef, scrollRef,
autoScroll, autoScroll,
@ -452,14 +502,17 @@ export function ChatActions(props: {
uploading: boolean; uploading: boolean;
setShowShortcutKeyModal: React.Dispatch<React.SetStateAction<boolean>>; setShowShortcutKeyModal: React.Dispatch<React.SetStateAction<boolean>>;
setUserInput: (input: string) => void; setUserInput: (input: string) => void;
setShowChatSidePanel: React.Dispatch<React.SetStateAction<boolean>>;
}) { }) {
const config = useAppConfig(); const config = useAppConfig();
const navigate = useNavigate(); const navigate = useNavigate();
const chatStore = useChatStore(); const chatStore = useChatStore();
const pluginStore = usePluginStore(); const pluginStore = usePluginStore();
const session = chatStore.currentSession();
// switch themes // switch themes
const theme = config.theme; const theme = config.theme;
function nextTheme() { function nextTheme() {
const themes = [Theme.Auto, Theme.Light, Theme.Dark]; const themes = [Theme.Auto, Theme.Light, Theme.Dark];
const themeIndex = themes.indexOf(theme); const themeIndex = themes.indexOf(theme);
@ -473,10 +526,9 @@ export function ChatActions(props: {
const stopAll = () => ChatControllerPool.stopAll(); const stopAll = () => ChatControllerPool.stopAll();
// switch model // switch model
const currentModel = chatStore.currentSession().mask.modelConfig.model; const currentModel = session.mask.modelConfig.model;
const currentProviderName = const currentProviderName =
chatStore.currentSession().mask.modelConfig?.providerName || session.mask.modelConfig?.providerName || ServiceProvider.OpenAI;
ServiceProvider.OpenAI;
const allModels = useAllModels(); const allModels = useAllModels();
const models = useMemo(() => { const models = useMemo(() => {
const filteredModels = allModels.filter((m) => m.available); const filteredModels = allModels.filter((m) => m.available);
@ -507,15 +559,13 @@ export function ChatActions(props: {
const [showSizeSelector, setShowSizeSelector] = useState(false); const [showSizeSelector, setShowSizeSelector] = useState(false);
const [showQualitySelector, setShowQualitySelector] = useState(false); const [showQualitySelector, setShowQualitySelector] = useState(false);
const [showStyleSelector, setShowStyleSelector] = useState(false); const [showStyleSelector, setShowStyleSelector] = useState(false);
const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"]; const modelSizes = getModelSizes(currentModel);
const dalle3Qualitys: DalleQuality[] = ["standard", "hd"]; const dalle3Qualitys: DalleQuality[] = ["standard", "hd"];
const dalle3Styles: DalleStyle[] = ["vivid", "natural"]; const dalle3Styles: DalleStyle[] = ["vivid", "natural"];
const currentSize = const currentSize =
chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024"; session.mask.modelConfig?.size ?? ("1024x1024" as ModelSize);
const currentQuality = const currentQuality = session.mask.modelConfig?.quality ?? "standard";
chatStore.currentSession().mask.modelConfig?.quality ?? "standard"; const currentStyle = session.mask.modelConfig?.style ?? "vivid";
const currentStyle =
chatStore.currentSession().mask.modelConfig?.style ?? "vivid";
const isMobileScreen = useMobileScreen(); const isMobileScreen = useMobileScreen();
@ -533,7 +583,7 @@ export function ChatActions(props: {
if (isUnavailableModel && models.length > 0) { if (isUnavailableModel && models.length > 0) {
// show next model to default model if exist // show next model to default model if exist
let nextModel = models.find((model) => model.isDefault) || models[0]; let nextModel = models.find((model) => model.isDefault) || models[0];
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.modelConfig.model = nextModel.name; session.mask.modelConfig.model = nextModel.name;
session.mask.modelConfig.providerName = nextModel?.provider session.mask.modelConfig.providerName = nextModel?.provider
?.providerName as ServiceProvider; ?.providerName as ServiceProvider;
@ -544,10 +594,11 @@ export function ChatActions(props: {
: nextModel.name, : nextModel.name,
); );
} }
}, [chatStore, currentModel, models]); }, [chatStore, currentModel, models, session]);
return ( return (
<div className={styles["chat-input-actions"]}> <div className={styles["chat-input-actions"]}>
<>
{couldStop && ( {couldStop && (
<ChatAction <ChatAction
onClick={stopAll} onClick={stopAll}
@ -611,7 +662,7 @@ export function ChatActions(props: {
text={Locale.Chat.InputActions.Clear} text={Locale.Chat.InputActions.Clear}
icon={<BreakIcon />} icon={<BreakIcon />}
onClick={() => { onClick={() => {
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
if (session.clearContextIndex === session.messages.length) { if (session.clearContextIndex === session.messages.length) {
session.clearContextIndex = undefined; session.clearContextIndex = undefined;
} else { } else {
@ -642,8 +693,8 @@ export function ChatActions(props: {
onClose={() => setShowModelSelector(false)} onClose={() => setShowModelSelector(false)}
onSelection={(s) => { onSelection={(s) => {
if (s.length === 0) return; if (s.length === 0) return;
const [model, providerName] = s[0].split("@"); const [model, providerName] = getModelProvider(s[0]);
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.modelConfig.model = model as ModelType; session.mask.modelConfig.model = model as ModelType;
session.mask.modelConfig.providerName = session.mask.modelConfig.providerName =
providerName as ServiceProvider; providerName as ServiceProvider;
@ -652,7 +703,8 @@ export function ChatActions(props: {
if (providerName == "ByteDance") { if (providerName == "ByteDance") {
const selectedModel = models.find( const selectedModel = models.find(
(m) => (m) =>
m.name == model && m?.provider?.providerName == providerName, m.name == model &&
m?.provider?.providerName == providerName,
); );
showToast(selectedModel?.displayName ?? ""); showToast(selectedModel?.displayName ?? "");
} else { } else {
@ -662,7 +714,7 @@ export function ChatActions(props: {
/> />
)} )}
{isDalle3(currentModel) && ( {supportsCustomSize(currentModel) && (
<ChatAction <ChatAction
onClick={() => setShowSizeSelector(true)} onClick={() => setShowSizeSelector(true)}
text={currentSize} text={currentSize}
@ -673,7 +725,7 @@ export function ChatActions(props: {
{showSizeSelector && ( {showSizeSelector && (
<Selector <Selector
defaultSelectedValue={currentSize} defaultSelectedValue={currentSize}
items={dalle3Sizes.map((m) => ({ items={modelSizes.map((m) => ({
title: m, title: m,
value: m, value: m,
}))} }))}
@ -681,7 +733,7 @@ export function ChatActions(props: {
onSelection={(s) => { onSelection={(s) => {
if (s.length === 0) return; if (s.length === 0) return;
const size = s[0]; const size = s[0];
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.modelConfig.size = size; session.mask.modelConfig.size = size;
}); });
showToast(size); showToast(size);
@ -708,7 +760,7 @@ export function ChatActions(props: {
onSelection={(q) => { onSelection={(q) => {
if (q.length === 0) return; if (q.length === 0) return;
const quality = q[0]; const quality = q[0];
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.modelConfig.quality = quality; session.mask.modelConfig.quality = quality;
}); });
showToast(quality); showToast(quality);
@ -735,7 +787,7 @@ export function ChatActions(props: {
onSelection={(s) => { onSelection={(s) => {
if (s.length === 0) return; if (s.length === 0) return;
const style = s[0]; const style = s[0];
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.modelConfig.style = style; session.mask.modelConfig.style = style;
}); });
showToast(style); showToast(style);
@ -766,7 +818,7 @@ export function ChatActions(props: {
}))} }))}
onClose={() => setShowPluginSelector(false)} onClose={() => setShowPluginSelector(false)}
onSelection={(s) => { onSelection={(s) => {
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
session.mask.plugin = s as string[]; session.mask.plugin = s as string[];
}); });
}} }}
@ -780,6 +832,17 @@ export function ChatActions(props: {
icon={<ShortcutkeyIcon />} icon={<ShortcutkeyIcon />}
/> />
)} )}
{!isMobileScreen && <MCPAction />}
</>
<div className={styles["chat-input-actions-end"]}>
{config.realtimeConfig.enable && (
<ChatAction
onClick={() => props.setShowChatSidePanel(true)}
text={"Realtime Chat"}
icon={<HeadphoneIcon />}
/>
)}
</div>
</div> </div>
); );
} }
@ -809,7 +872,8 @@ export function EditMessageModal(props: { onClose: () => void }) {
icon={<ConfirmIcon />} icon={<ConfirmIcon />}
key="ok" key="ok"
onClick={() => { onClick={() => {
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session.messages = messages), (session) => (session.messages = messages),
); );
props.onClose(); props.onClose();
@ -826,7 +890,8 @@ export function EditMessageModal(props: { onClose: () => void }) {
type="text" type="text"
value={session.topic} value={session.topic}
onInput={(e) => onInput={(e) =>
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session.topic = e.currentTarget.value), (session) => (session.topic = e.currentTarget.value),
) )
} }
@ -874,6 +939,12 @@ export function ShortcutKeyModal(props: { onClose: () => void }) {
title: Locale.Chat.ShortcutKey.showShortcutKey, title: Locale.Chat.ShortcutKey.showShortcutKey,
keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"], keys: isMac ? ["⌘", "/"] : ["Ctrl", "/"],
}, },
{
title: Locale.Chat.ShortcutKey.clearContext,
keys: isMac
? ["⌘", "Shift", "backspace"]
: ["Ctrl", "Shift", "backspace"],
},
]; ];
return ( return (
<div className="modal-mask"> <div className="modal-mask">
@ -937,9 +1008,25 @@ function _Chat() {
(scrollRef.current.scrollTop + scrollRef.current.clientHeight), (scrollRef.current.scrollTop + scrollRef.current.clientHeight),
) <= 1 ) <= 1
: false; : false;
const isAttachWithTop = useMemo(() => {
const lastMessage = scrollRef.current?.lastElementChild as HTMLElement;
// if scrolllRef is not ready or no message, return false
if (!scrollRef?.current || !lastMessage) return false;
const topDistance =
lastMessage!.getBoundingClientRect().top -
scrollRef.current.getBoundingClientRect().top;
// leave some space for user question
return topDistance < 100;
}, [scrollRef?.current?.scrollHeight]);
const isTyping = userInput !== "";
// if user is typing, should auto scroll to bottom
// if user is not typing, should auto scroll to bottom only if already at bottom
const { setAutoScroll, scrollDomToBottom } = useScrollToBottom( const { setAutoScroll, scrollDomToBottom } = useScrollToBottom(
scrollRef, scrollRef,
isScrolledToBottom, (isScrolledToBottom || isAttachWithTop) && !isTyping,
session.messages,
); );
const [hitBottom, setHitBottom] = useState(true); const [hitBottom, setHitBottom] = useState(true);
const isMobileScreen = useMobileScreen(); const isMobileScreen = useMobileScreen();
@ -987,7 +1074,8 @@ function _Chat() {
prev: () => chatStore.nextSession(-1), prev: () => chatStore.nextSession(-1),
next: () => chatStore.nextSession(1), next: () => chatStore.nextSession(1),
clear: () => clear: () =>
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session.clearContextIndex = session.messages.length), (session) => (session.clearContextIndex = session.messages.length),
), ),
fork: () => chatStore.forkSession(), fork: () => chatStore.forkSession(),
@ -1015,7 +1103,7 @@ function _Chat() {
}; };
const doSubmit = (userInput: string) => { const doSubmit = (userInput: string) => {
if (userInput.trim() === "") return; if (userInput.trim() === "" && isEmpty(attachImages)) return;
const matchCommand = chatCommands.match(userInput); const matchCommand = chatCommands.match(userInput);
if (matchCommand.matched) { if (matchCommand.matched) {
setUserInput(""); setUserInput("");
@ -1058,7 +1146,7 @@ function _Chat() {
}; };
useEffect(() => { useEffect(() => {
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(session, (session) => {
const stopTiming = Date.now() - REQUEST_TIMEOUT_MS; const stopTiming = Date.now() - REQUEST_TIMEOUT_MS;
session.messages.forEach((m) => { session.messages.forEach((m) => {
// check if should stop all stale messages // check if should stop all stale messages
@ -1084,7 +1172,7 @@ function _Chat() {
} }
}); });
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, []); }, [session]);
// check if should send message // check if should send message
const onInputKeyDown = (e: React.KeyboardEvent<HTMLTextAreaElement>) => { const onInputKeyDown = (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
@ -1115,7 +1203,8 @@ function _Chat() {
}; };
const deleteMessage = (msgId?: string) => { const deleteMessage = (msgId?: string) => {
chatStore.updateCurrentSession( chatStore.updateTargetSession(
session,
(session) => (session) =>
(session.messages = session.messages.filter((m) => m.id !== msgId)), (session.messages = session.messages.filter((m) => m.id !== msgId)),
); );
@ -1182,7 +1271,7 @@ function _Chat() {
}; };
const onPinMessage = (message: ChatMessage) => { const onPinMessage = (message: ChatMessage) => {
chatStore.updateCurrentSession((session) => chatStore.updateTargetSession(session, (session) =>
session.mask.context.push(message), session.mask.context.push(message),
); );
@ -1197,6 +1286,7 @@ function _Chat() {
const accessStore = useAccessStore(); const accessStore = useAccessStore();
const [speechStatus, setSpeechStatus] = useState(false); const [speechStatus, setSpeechStatus] = useState(false);
const [speechLoading, setSpeechLoading] = useState(false); const [speechLoading, setSpeechLoading] = useState(false);
async function openaiSpeech(text: string) { async function openaiSpeech(text: string) {
if (speechStatus) { if (speechStatus) {
ttsPlayer.stop(); ttsPlayer.stop();
@ -1296,6 +1386,7 @@ function _Chat() {
const [msgRenderIndex, _setMsgRenderIndex] = useState( const [msgRenderIndex, _setMsgRenderIndex] = useState(
Math.max(0, renderMessages.length - CHAT_PAGE_SIZE), Math.max(0, renderMessages.length - CHAT_PAGE_SIZE),
); );
function setMsgRenderIndex(newIndex: number) { function setMsgRenderIndex(newIndex: number) {
newIndex = Math.min(renderMessages.length - CHAT_PAGE_SIZE, newIndex); newIndex = Math.min(renderMessages.length - CHAT_PAGE_SIZE, newIndex);
newIndex = Math.max(0, newIndex); newIndex = Math.max(0, newIndex);
@ -1331,6 +1422,7 @@ function _Chat() {
setHitBottom(isHitBottom); setHitBottom(isHitBottom);
setAutoScroll(isHitBottom); setAutoScroll(isHitBottom);
}; };
function scrollToBottom() { function scrollToBottom() {
setMsgRenderIndex(renderMessages.length - CHAT_PAGE_SIZE); setMsgRenderIndex(renderMessages.length - CHAT_PAGE_SIZE);
scrollDomToBottom(); scrollDomToBottom();
@ -1509,7 +1601,7 @@ function _Chat() {
const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false); const [showShortcutKeyModal, setShowShortcutKeyModal] = useState(false);
useEffect(() => { useEffect(() => {
const handleKeyDown = (event: any) => { const handleKeyDown = (event: KeyboardEvent) => {
// 打开新聊天 command + shift + o // 打开新聊天 command + shift + o
if ( if (
(event.metaKey || event.ctrlKey) && (event.metaKey || event.ctrlKey) &&
@ -1560,16 +1652,35 @@ function _Chat() {
event.preventDefault(); event.preventDefault();
setShowShortcutKeyModal(true); setShowShortcutKeyModal(true);
} }
// 清除上下文 command + shift + backspace
else if (
(event.metaKey || event.ctrlKey) &&
event.shiftKey &&
event.key.toLowerCase() === "backspace"
) {
event.preventDefault();
chatStore.updateTargetSession(session, (session) => {
if (session.clearContextIndex === session.messages.length) {
session.clearContextIndex = undefined;
} else {
session.clearContextIndex = session.messages.length;
session.memoryPrompt = ""; // will clear memory
}
});
}
}; };
window.addEventListener("keydown", handleKeyDown); document.addEventListener("keydown", handleKeyDown);
return () => { return () => {
window.removeEventListener("keydown", handleKeyDown); document.removeEventListener("keydown", handleKeyDown);
}; };
}, [messages, chatStore, navigate]); }, [messages, chatStore, navigate, session]);
const [showChatSidePanel, setShowChatSidePanel] = useState(false);
return ( return (
<>
<div className={styles.chat} key={session.id}> <div className={styles.chat} key={session.id}>
<div className="window-header" data-tauri-drag-region> <div className="window-header" data-tauri-drag-region>
{isMobileScreen && ( {isMobileScreen && (
@ -1585,9 +1696,14 @@ function _Chat() {
</div> </div>
)} )}
<div className={`window-header-title ${styles["chat-body-title"]}`}>
<div <div
className={`window-header-main-title ${styles["chat-body-main-title"]}`} className={clsx("window-header-title", styles["chat-body-title"])}
>
<div
className={clsx(
"window-header-main-title",
styles["chat-body-main-title"],
)}
onClickCapture={() => setIsEditingMessage(true)} onClickCapture={() => setIsEditingMessage(true)}
> >
{!session.topic ? DEFAULT_TOPIC : session.topic} {!session.topic ? DEFAULT_TOPIC : session.topic}
@ -1604,7 +1720,7 @@ function _Chat() {
title={Locale.Chat.Actions.RefreshTitle} title={Locale.Chat.Actions.RefreshTitle}
onClick={() => { onClick={() => {
showToast(Locale.Chat.Actions.RefreshToast); showToast(Locale.Chat.Actions.RefreshToast);
chatStore.summarizeSession(true); chatStore.summarizeSession(true, session);
}} }}
/> />
</div> </div>
@ -1652,7 +1768,8 @@ function _Chat() {
setShowModal={setShowPromptModal} setShowModal={setShowPromptModal}
/> />
</div> </div>
<div className={styles["chat-main"]}>
<div className={styles["chat-body-container"]}>
<div <div
className={styles["chat-body"]} className={styles["chat-body"]}
ref={scrollRef} ref={scrollRef}
@ -1663,7 +1780,10 @@ function _Chat() {
setAutoScroll(false); setAutoScroll(false);
}} }}
> >
{messages.map((message, i) => { {messages
// TODO
// .filter((m) => !m.isMcpResponse)
.map((message, i) => {
const isUser = message.role === "user"; const isUser = message.role === "user";
const isContext = i < context.length; const isContext = i < context.length;
const showActions = const showActions =
@ -1672,13 +1792,16 @@ function _Chat() {
!isContext; !isContext;
const showTyping = message.preview || message.streaming; const showTyping = message.preview || message.streaming;
const shouldShowClearContextDivider = i === clearContextIndex - 1; const shouldShowClearContextDivider =
i === clearContextIndex - 1;
return ( return (
<Fragment key={message.id}> <Fragment key={message.id}>
<div <div
className={ className={
isUser ? styles["chat-message-user"] : styles["chat-message"] isUser
? styles["chat-message-user"]
: styles["chat-message"]
} }
> >
<div className={styles["chat-message-container"]}> <div className={styles["chat-message-container"]}>
@ -1694,11 +1817,14 @@ function _Chat() {
getMessageTextContent(message), getMessageTextContent(message),
10, 10,
); );
let newContent: string | MultimodalContent[] = let newContent:
newMessage; | string
| MultimodalContent[] = newMessage;
const images = getMessageImages(message); const images = getMessageImages(message);
if (images.length > 0) { if (images.length > 0) {
newContent = [{ type: "text", text: newMessage }]; newContent = [
{ type: "text", text: newMessage },
];
for (let i = 0; i < images.length; i++) { for (let i = 0; i < images.length; i++) {
newContent.push({ newContent.push({
type: "image_url", type: "image_url",
@ -1708,14 +1834,17 @@ function _Chat() {
}); });
} }
} }
chatStore.updateCurrentSession((session) => { chatStore.updateTargetSession(
session,
(session) => {
const m = session.mask.context const m = session.mask.context
.concat(session.messages) .concat(session.messages)
.find((m) => m.id === message.id); .find((m) => m.id === message.id);
if (m) { if (m) {
m.content = newContent; m.content = newContent;
} }
}); },
);
}} }}
></IconButton> ></IconButton>
</div> </div>
@ -1729,7 +1858,8 @@ function _Chat() {
<MaskAvatar <MaskAvatar
avatar={session.mask.avatar} avatar={session.mask.avatar}
model={ model={
message.model || session.mask.modelConfig.model message.model ||
session.mask.modelConfig.model
} }
/> />
)} )}
@ -1749,7 +1879,9 @@ function _Chat() {
<ChatAction <ChatAction
text={Locale.Chat.Actions.Stop} text={Locale.Chat.Actions.Stop}
icon={<StopIcon />} icon={<StopIcon />}
onClick={() => onUserStop(message.id ?? i)} onClick={() =>
onUserStop(message.id ?? i)
}
/> />
) : ( ) : (
<> <>
@ -1762,7 +1894,9 @@ function _Chat() {
<ChatAction <ChatAction
text={Locale.Chat.Actions.Delete} text={Locale.Chat.Actions.Delete}
icon={<DeleteIcon />} icon={<DeleteIcon />}
onClick={() => onDelete(message.id ?? i)} onClick={() =>
onDelete(message.id ?? i)
}
/> />
<ChatAction <ChatAction
@ -1794,7 +1928,9 @@ function _Chat() {
) )
} }
onClick={() => onClick={() =>
openaiSpeech(getMessageTextContent(message)) openaiSpeech(
getMessageTextContent(message),
)
} }
/> />
)} )}
@ -1815,6 +1951,7 @@ function _Chat() {
{message?.tools?.map((tool) => ( {message?.tools?.map((tool) => (
<div <div
key={tool.id} key={tool.id}
title={tool?.errorMsg}
className={styles["chat-message-tool"]} className={styles["chat-message-tool"]}
> >
{tool.isError === false ? ( {tool.isError === false ? (
@ -1860,25 +1997,35 @@ function _Chat() {
className={styles["chat-message-item-images"]} className={styles["chat-message-item-images"]}
style={ style={
{ {
"--image-count": getMessageImages(message).length, "--image-count":
getMessageImages(message).length,
} as React.CSSProperties } as React.CSSProperties
} }
> >
{getMessageImages(message).map((image, index) => { {getMessageImages(message).map(
(image, index) => {
return ( return (
<img <img
className={ className={
styles["chat-message-item-image-multi"] styles[
"chat-message-item-image-multi"
]
} }
key={index} key={index}
src={image} src={image}
alt="" alt=""
/> />
); );
})} },
)}
</div> </div>
)} )}
</div> </div>
{message?.audio_url && (
<div className={styles["chat-message-audio"]}>
<audio src={message.audio_url} controls />
</div>
)}
<div className={styles["chat-message-action-date"]}> <div className={styles["chat-message-action-date"]}>
{isContext {isContext
@ -1892,9 +2039,11 @@ function _Chat() {
); );
})} })}
</div> </div>
<div className={styles["chat-input-panel"]}> <div className={styles["chat-input-panel"]}>
<PromptHints prompts={promptHints} onPromptSelect={onPromptSelect} /> <PromptHints
prompts={promptHints}
onPromptSelect={onPromptSelect}
/>
<ChatActions <ChatActions
uploadImage={uploadImage} uploadImage={uploadImage}
@ -1917,13 +2066,13 @@ function _Chat() {
}} }}
setShowShortcutKeyModal={setShowShortcutKeyModal} setShowShortcutKeyModal={setShowShortcutKeyModal}
setUserInput={setUserInput} setUserInput={setUserInput}
setShowChatSidePanel={setShowChatSidePanel}
/> />
<label <label
className={`${styles["chat-input-panel-inner"]} ${ className={clsx(styles["chat-input-panel-inner"], {
attachImages.length != 0 [styles["chat-input-panel-inner-attach"]]:
? styles["chat-input-panel-inner-attach"] attachImages.length !== 0,
: "" })}
}`}
htmlFor="chat-input" htmlFor="chat-input"
> >
<textarea <textarea
@ -1976,7 +2125,26 @@ function _Chat() {
/> />
</label> </label>
</div> </div>
</div>
<div
className={clsx(styles["chat-side-panel"], {
[styles["mobile"]]: isMobileScreen,
[styles["chat-side-panel-show"]]: showChatSidePanel,
})}
>
{showChatSidePanel && (
<RealtimeChat
onClose={() => {
setShowChatSidePanel(false);
}}
onStartVoice={async () => {
console.log("start voice");
}}
/>
)}
</div>
</div>
</div>
{showExport && ( {showExport && (
<ExportMessageModal onClose={() => setShowExport(false)} /> <ExportMessageModal onClose={() => setShowExport(false)} />
)} )}
@ -1992,12 +2160,12 @@ function _Chat() {
{showShortcutKeyModal && ( {showShortcutKeyModal && (
<ShortcutKeyModal onClose={() => setShowShortcutKeyModal(false)} /> <ShortcutKeyModal onClose={() => setShowShortcutKeyModal(false)} />
)} )}
</div> </>
); );
} }
export function Chat() { export function Chat() {
const chatStore = useChatStore(); const chatStore = useChatStore();
const sessionIndex = chatStore.currentSessionIndex; const session = chatStore.currentSession();
return <_Chat key={sessionIndex}></_Chat>; return <_Chat key={session.id}></_Chat>;
} }

View File

@ -6,8 +6,21 @@ import EmojiPicker, {
import { ModelType } from "../store"; import { ModelType } from "../store";
import BotIcon from "../icons/bot.svg"; import BotIconDefault from "../icons/llm-icons/default.svg";
import BlackBotIcon from "../icons/black-bot.svg"; import BotIconOpenAI from "../icons/llm-icons/openai.svg";
import BotIconGemini from "../icons/llm-icons/gemini.svg";
import BotIconGemma from "../icons/llm-icons/gemma.svg";
import BotIconClaude from "../icons/llm-icons/claude.svg";
import BotIconMeta from "../icons/llm-icons/meta.svg";
import BotIconMistral from "../icons/llm-icons/mistral.svg";
import BotIconDeepseek from "../icons/llm-icons/deepseek.svg";
import BotIconMoonshot from "../icons/llm-icons/moonshot.svg";
import BotIconQwen from "../icons/llm-icons/qwen.svg";
import BotIconWenxin from "../icons/llm-icons/wenxin.svg";
import BotIconGrok from "../icons/llm-icons/grok.svg";
import BotIconHunyuan from "../icons/llm-icons/hunyuan.svg";
import BotIconDoubao from "../icons/llm-icons/doubao.svg";
import BotIconChatglm from "../icons/llm-icons/chatglm.svg";
export function getEmojiUrl(unified: string, style: EmojiStyle) { export function getEmojiUrl(unified: string, style: EmojiStyle) {
// Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis
@ -33,15 +46,55 @@ export function AvatarPicker(props: {
} }
export function Avatar(props: { model?: ModelType; avatar?: string }) { export function Avatar(props: { model?: ModelType; avatar?: string }) {
let LlmIcon = BotIconDefault;
if (props.model) { if (props.model) {
const modelName = props.model.toLowerCase();
if (
modelName.startsWith("gpt") ||
modelName.startsWith("chatgpt") ||
modelName.startsWith("dall-e") ||
modelName.startsWith("dalle") ||
modelName.startsWith("o1") ||
modelName.startsWith("o3")
) {
LlmIcon = BotIconOpenAI;
} else if (modelName.startsWith("gemini")) {
LlmIcon = BotIconGemini;
} else if (modelName.startsWith("gemma")) {
LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude;
} else if (modelName.includes("llama")) {
LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
LlmIcon = BotIconMistral;
} else if (modelName.includes("deepseek")) {
LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot;
} else if (modelName.startsWith("qwen")) {
LlmIcon = BotIconQwen;
} else if (modelName.startsWith("ernie")) {
LlmIcon = BotIconWenxin;
} else if (modelName.startsWith("grok")) {
LlmIcon = BotIconGrok;
} else if (modelName.startsWith("hunyuan")) {
LlmIcon = BotIconHunyuan;
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (
modelName.includes("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {
LlmIcon = BotIconChatglm;
}
return ( return (
<div className="no-dark"> <div className="no-dark">
{props.model?.startsWith("gpt-4") || <LlmIcon className="user-avatar" width={30} height={30} />
props.model?.startsWith("chatgpt-4o") ? (
<BlackBotIcon className="user-avatar" />
) : (
<BotIcon className="user-avatar" />
)}
</div> </div>
); );
} }

View File

@ -23,7 +23,6 @@ import CopyIcon from "../icons/copy.svg";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import ChatGptIcon from "../icons/chatgpt.png"; import ChatGptIcon from "../icons/chatgpt.png";
import ShareIcon from "../icons/share.svg"; import ShareIcon from "../icons/share.svg";
import BotIcon from "../icons/bot.png";
import DownloadIcon from "../icons/download.svg"; import DownloadIcon from "../icons/download.svg";
import { useEffect, useMemo, useRef, useState } from "react"; import { useEffect, useMemo, useRef, useState } from "react";
@ -33,13 +32,14 @@ import dynamic from "next/dynamic";
import NextImage from "next/image"; import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image"; import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
import { prettyObject } from "../utils/format"; import { prettyObject } from "../utils/format";
import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { type ClientApi, getClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { getMessageTextContent } from "../utils"; import { getMessageTextContent } from "../utils";
import { MaskAvatar } from "./mask";
import clsx from "clsx";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />, loading: () => <LoadingIcon />,
@ -118,9 +118,10 @@ function Steps<
return ( return (
<div <div
key={i} key={i}
className={`${styles["step"]} ${ className={clsx("clickable", styles["step"], {
styles[i <= props.index ? "step-finished" : ""] [styles["step-finished"]]: i <= props.index,
} ${i === props.index && styles["step-current"]} clickable`} [styles["step-current"]]: i === props.index,
})}
onClick={() => { onClick={() => {
props.onStepChange?.(i); props.onStepChange?.(i);
}} }}
@ -405,22 +406,6 @@ export function PreviewActions(props: {
); );
} }
function ExportAvatar(props: { avatar: string }) {
if (props.avatar === DEFAULT_MASK_AVATAR) {
return (
<img
src={BotIcon.src}
width={30}
height={30}
alt="bot"
className="user-avatar"
/>
);
}
return <Avatar avatar={props.avatar} />;
}
export function ImagePreviewer(props: { export function ImagePreviewer(props: {
messages: ChatMessage[]; messages: ChatMessage[];
topic: string; topic: string;
@ -525,11 +510,11 @@ export function ImagePreviewer(props: {
messages={props.messages} messages={props.messages}
/> />
<div <div
className={`${styles["preview-body"]} ${styles["default-theme"]}`} className={clsx(styles["preview-body"], styles["default-theme"])}
ref={previewRef} ref={previewRef}
> >
<div className={styles["chat-info"]}> <div className={styles["chat-info"]}>
<div className={styles["logo"] + " no-dark"}> <div className={clsx(styles["logo"], "no-dark")}>
<NextImage <NextImage
src={ChatGptIcon.src} src={ChatGptIcon.src}
alt="logo" alt="logo"
@ -544,9 +529,12 @@ export function ImagePreviewer(props: {
github.com/ChatGPTNextWeb/ChatGPT-Next-Web github.com/ChatGPTNextWeb/ChatGPT-Next-Web
</div> </div>
<div className={styles["icons"]}> <div className={styles["icons"]}>
<ExportAvatar avatar={config.avatar} /> <MaskAvatar avatar={config.avatar} />
<span className={styles["icon-space"]}>&</span> <span className={styles["icon-space"]}>&</span>
<ExportAvatar avatar={mask.avatar} /> <MaskAvatar
avatar={mask.avatar}
model={session.mask.modelConfig.model}
/>
</div> </div>
</div> </div>
<div> <div>
@ -570,13 +558,18 @@ export function ImagePreviewer(props: {
{props.messages.map((m, i) => { {props.messages.map((m, i) => {
return ( return (
<div <div
className={styles["message"] + " " + styles["message-" + m.role]} className={clsx(styles["message"], styles["message-" + m.role])}
key={i} key={i}
> >
<div className={styles["avatar"]}> <div className={styles["avatar"]}>
<ExportAvatar {m.role === "user" ? (
avatar={m.role === "user" ? config.avatar : mask.avatar} <Avatar avatar={config.avatar}></Avatar>
) : (
<MaskAvatar
avatar={session.mask.avatar}
model={m.model || session.mask.modelConfig.model}
/> />
)}
</div> </div>
<div className={styles["body"]}> <div className={styles["body"]}>

View File

@ -140,6 +140,9 @@
display: flex; display: flex;
justify-content: space-between; justify-content: space-between;
align-items: center; align-items: center;
&-narrow {
justify-content: center;
}
} }
.sidebar-logo { .sidebar-logo {

View File

@ -2,8 +2,7 @@
require("../polyfill"); require("../polyfill");
import { useState, useEffect } from "react"; import { useEffect, useState } from "react";
import styles from "./home.module.scss"; import styles from "./home.module.scss";
import BotIcon from "../icons/bot.svg"; import BotIcon from "../icons/bot.svg";
@ -19,8 +18,8 @@ import { getISOLang, getLang } from "../locales";
import { import {
HashRouter as Router, HashRouter as Router,
Routes,
Route, Route,
Routes,
useLocation, useLocation,
} from "react-router-dom"; } from "react-router-dom";
import { SideBar } from "./sidebar"; import { SideBar } from "./sidebar";
@ -29,10 +28,12 @@ import { AuthPage } from "./auth";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { type ClientApi, getClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { useAccessStore } from "../store"; import { useAccessStore } from "../store";
import clsx from "clsx";
import { initializeMcpSystem, isMcpEnabled } from "../mcp/actions";
export function Loading(props: { noLogo?: boolean }) { export function Loading(props: { noLogo?: boolean }) {
return ( return (
<div className={styles["loading-content"] + " no-dark"}> <div className={clsx("no-dark", styles["loading-content"])}>
{!props.noLogo && <BotIcon />} {!props.noLogo && <BotIcon />}
<LoadingIcon /> <LoadingIcon />
</div> </div>
@ -74,6 +75,13 @@ const Sd = dynamic(async () => (await import("./sd")).Sd, {
loading: () => <Loading noLogo />, loading: () => <Loading noLogo />,
}); });
const McpMarketPage = dynamic(
async () => (await import("./mcp-market")).McpMarketPage,
{
loading: () => <Loading noLogo />,
},
);
export function useSwitchTheme() { export function useSwitchTheme() {
const config = useAppConfig(); const config = useAppConfig();
@ -179,7 +187,11 @@ function Screen() {
if (isSdNew) return <Sd />; if (isSdNew) return <Sd />;
return ( return (
<> <>
<SideBar className={isHome ? styles["sidebar-show"] : ""} /> <SideBar
className={clsx({
[styles["sidebar-show"]]: isHome,
})}
/>
<WindowContent> <WindowContent>
<Routes> <Routes>
<Route path={Path.Home} element={<Chat />} /> <Route path={Path.Home} element={<Chat />} />
@ -189,6 +201,7 @@ function Screen() {
<Route path={Path.SearchChat} element={<SearchChat />} /> <Route path={Path.SearchChat} element={<SearchChat />} />
<Route path={Path.Chat} element={<Chat />} /> <Route path={Path.Chat} element={<Chat />} />
<Route path={Path.Settings} element={<Settings />} /> <Route path={Path.Settings} element={<Settings />} />
<Route path={Path.McpMarket} element={<McpMarketPage />} />
</Routes> </Routes>
</WindowContent> </WindowContent>
</> </>
@ -197,9 +210,10 @@ function Screen() {
return ( return (
<div <div
className={`${styles.container} ${ className={clsx(styles.container, {
shouldTightBorder ? styles["tight-container"] : styles.container [styles["tight-container"]]: shouldTightBorder,
} ${getLang() === "ar" ? styles["rtl-screen"] : ""}`} [styles["rtl-screen"]]: getLang() === "ar",
})}
> >
{renderContent()} {renderContent()}
</div> </div>
@ -228,6 +242,20 @@ export function Home() {
useEffect(() => { useEffect(() => {
console.log("[Config] got config from build time", getClientConfig()); console.log("[Config] got config from build time", getClientConfig());
useAccessStore.getState().fetch(); useAccessStore.getState().fetch();
const initMcp = async () => {
try {
const enabled = await isMcpEnabled();
if (enabled) {
console.log("[MCP] initializing...");
await initializeMcpSystem();
console.log("[MCP] initialized");
}
} catch (err) {
console.error("[MCP] failed to initialize:", err);
}
};
initMcp();
}, []); }, []);
if (!useHasHydrated()) { if (!useHasHydrated()) {

View File

@ -1,5 +1,6 @@
import * as React from "react"; import * as React from "react";
import styles from "./input-range.module.scss"; import styles from "./input-range.module.scss";
import clsx from "clsx";
interface InputRangeProps { interface InputRangeProps {
onChange: React.ChangeEventHandler<HTMLInputElement>; onChange: React.ChangeEventHandler<HTMLInputElement>;
@ -23,7 +24,7 @@ export function InputRange({
aria, aria,
}: InputRangeProps) { }: InputRangeProps) {
return ( return (
<div className={styles["input-range"] + ` ${className ?? ""}`}> <div className={clsx(styles["input-range"], className)}>
{title || value} {title || value}
<input <input
aria-label={aria} aria-label={aria}

View File

@ -23,6 +23,7 @@ import { useChatStore } from "../store";
import { IconButton } from "./button"; import { IconButton } from "./button";
import { useAppConfig } from "../store/config"; import { useAppConfig } from "../store/config";
import clsx from "clsx";
export function Mermaid(props: { code: string }) { export function Mermaid(props: { code: string }) {
const ref = useRef<HTMLDivElement>(null); const ref = useRef<HTMLDivElement>(null);
@ -57,7 +58,7 @@ export function Mermaid(props: { code: string }) {
return ( return (
<div <div
className="no-dark mermaid" className={clsx("no-dark", "mermaid")}
style={{ style={{
cursor: "pointer", cursor: "pointer",
overflow: "auto", overflow: "auto",
@ -89,7 +90,11 @@ export function PreCode(props: { children: any }) {
const refText = ref.current.querySelector("code")?.innerText; const refText = ref.current.querySelector("code")?.innerText;
if (htmlDom) { if (htmlDom) {
setHtmlCode((htmlDom as HTMLElement).innerText); setHtmlCode((htmlDom as HTMLElement).innerText);
} else if (refText?.startsWith("<!DOCTYPE")) { } else if (
refText?.startsWith("<!DOCTYPE") ||
refText?.startsWith("<svg") ||
refText?.startsWith("<?xml")
) {
setHtmlCode(refText); setHtmlCode(refText);
} }
}, 600); }, 600);
@ -169,6 +174,12 @@ export function PreCode(props: { children: any }) {
} }
function CustomCode(props: { children: any; className?: string }) { function CustomCode(props: { children: any; className?: string }) {
const chatStore = useChatStore();
const session = chatStore.currentSession();
const config = useAppConfig();
const enableCodeFold =
session.mask?.enableCodeFold !== false && config.enableCodeFold;
const ref = useRef<HTMLPreElement>(null); const ref = useRef<HTMLPreElement>(null);
const [collapsed, setCollapsed] = useState(true); const [collapsed, setCollapsed] = useState(true);
const [showToggle, setShowToggle] = useState(false); const [showToggle, setShowToggle] = useState(false);
@ -184,46 +195,39 @@ function CustomCode(props: { children: any; className?: string }) {
const toggleCollapsed = () => { const toggleCollapsed = () => {
setCollapsed((collapsed) => !collapsed); setCollapsed((collapsed) => !collapsed);
}; };
const renderShowMoreButton = () => {
if (showToggle && enableCodeFold && collapsed) {
return (
<div
className={clsx("show-hide-button", {
collapsed,
expanded: !collapsed,
})}
>
<button onClick={toggleCollapsed}>{Locale.NewChat.More}</button>
</div>
);
}
return null;
};
return ( return (
<> <>
<code <code
className={props?.className} className={clsx(props?.className)}
ref={ref} ref={ref}
style={{ style={{
maxHeight: collapsed ? "400px" : "none", maxHeight: enableCodeFold && collapsed ? "400px" : "none",
overflowY: "hidden", overflowY: "hidden",
}} }}
> >
{props.children} {props.children}
</code> </code>
{showToggle && collapsed && (
<div {renderShowMoreButton()}
className={`show-hide-button ${collapsed ? "collapsed" : "expanded"}`}
>
<button onClick={toggleCollapsed}>{Locale.NewChat.More}</button>
</div>
)}
</> </>
); );
} }
function escapeDollarNumber(text: string) {
let escapedText = "";
for (let i = 0; i < text.length; i += 1) {
let char = text[i];
const nextChar = text[i + 1] || " ";
if (char === "$" && nextChar >= "0" && nextChar <= "9") {
char = "\\$";
}
escapedText += char;
}
return escapedText;
}
function escapeBrackets(text: string) { function escapeBrackets(text: string) {
const pattern = const pattern =
/(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g; /(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g;
@ -244,6 +248,10 @@ function escapeBrackets(text: string) {
function tryWrapHtmlCode(text: string) { function tryWrapHtmlCode(text: string) {
// try add wrap html code (fixed: html codeblock include 2 newline) // try add wrap html code (fixed: html codeblock include 2 newline)
// ignore embed codeblock
if (text.includes("```")) {
return text;
}
return text return text
.replace( .replace(
/([`]*?)(\w*?)([\n\r]*?)(<!DOCTYPE html>)/g, /([`]*?)(\w*?)([\n\r]*?)(<!DOCTYPE html>)/g,
@ -252,7 +260,7 @@ function tryWrapHtmlCode(text: string) {
}, },
) )
.replace( .replace(
/(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*?)([`]*?)([\n\r]*?)/g, /(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*)([`]*)([\n\r]*?)/g,
(match, bodyEnd, space, htmlEnd, newLine, quoteEnd) => { (match, bodyEnd, space, htmlEnd, newLine, quoteEnd) => {
return !quoteEnd ? bodyEnd + space + htmlEnd + "\n```\n" : match; return !quoteEnd ? bodyEnd + space + htmlEnd + "\n```\n" : match;
}, },
@ -261,7 +269,7 @@ function tryWrapHtmlCode(text: string) {
function _MarkDownContent(props: { content: string }) { function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => { const escapedContent = useMemo(() => {
return tryWrapHtmlCode(escapeBrackets(escapeDollarNumber(props.content))); return tryWrapHtmlCode(escapeBrackets(props.content));
}, [props.content]); }, [props.content]);
return ( return (

View File

@ -55,6 +55,7 @@ import {
OnDragEndResponder, OnDragEndResponder,
} from "@hello-pangea/dnd"; } from "@hello-pangea/dnd";
import { getMessageTextContent } from "../utils"; import { getMessageTextContent } from "../utils";
import clsx from "clsx";
// drag and drop helper function // drag and drop helper function
function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] { function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] {
@ -183,6 +184,23 @@ export function MaskConfig(props: {
></input> ></input>
</ListItem> </ListItem>
)} )}
{globalConfig.enableCodeFold && (
<ListItem
title={Locale.Mask.Config.CodeFold.Title}
subTitle={Locale.Mask.Config.CodeFold.SubTitle}
>
<input
aria-label={Locale.Mask.Config.CodeFold.Title}
type="checkbox"
checked={props.mask.enableCodeFold !== false}
onChange={(e) => {
props.updateMask((mask) => {
mask.enableCodeFold = e.currentTarget.checked;
});
}}
></input>
</ListItem>
)}
{!props.shouldSyncFromGlobal ? ( {!props.shouldSyncFromGlobal ? (
<ListItem <ListItem
@ -571,7 +589,7 @@ export function MaskPage() {
</div> </div>
<div className={styles["mask-title"]}> <div className={styles["mask-title"]}>
<div className={styles["mask-name"]}>{m.name}</div> <div className={styles["mask-name"]}>{m.name}</div>
<div className={styles["mask-info"] + " one-line"}> <div className={clsx(styles["mask-info"], "one-line")}>
{`${Locale.Mask.Item.Info(m.context.length)} / ${ {`${Locale.Mask.Item.Info(m.context.length)} / ${
ALL_LANG_OPTIONS[m.lang] ALL_LANG_OPTIONS[m.lang]
} / ${m.modelConfig.model}`} } / ${m.modelConfig.model}`}

View File

@ -0,0 +1,657 @@
@import "../styles/animation.scss";
.mcp-market-page {
height: 100%;
display: flex;
flex-direction: column;
.loading-indicator {
font-size: 12px;
color: var(--primary);
margin-left: 8px;
font-weight: normal;
opacity: 0.8;
}
.mcp-market-page-body {
padding: 20px;
overflow-y: auto;
.loading-container,
.empty-container {
display: flex;
justify-content: center;
align-items: center;
min-height: 200px;
width: 100%;
background-color: var(--white);
border: var(--border-in-light);
border-radius: 10px;
animation: slide-in ease 0.3s;
}
.loading-text,
.empty-text {
font-size: 14px;
color: var(--black);
opacity: 0.5;
text-align: center;
}
.mcp-market-filter {
width: 100%;
max-width: 100%;
margin-bottom: 20px;
animation: slide-in ease 0.3s;
height: 40px;
display: flex;
.search-bar {
flex-grow: 1;
max-width: 100%;
min-width: 0;
}
}
.server-list {
display: flex;
flex-direction: column;
gap: 1px;
}
.mcp-market-item {
padding: 20px;
border: var(--border-in-light);
animation: slide-in ease 0.3s;
background-color: var(--white);
transition: all 0.3s ease;
&.disabled {
opacity: 0.7;
pointer-events: none;
}
&:not(:last-child) {
border-bottom: 0;
}
&:first-child {
border-top-left-radius: 10px;
border-top-right-radius: 10px;
}
&:last-child {
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
}
&.loading {
position: relative;
&::after {
content: "";
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.2),
transparent
);
background-size: 200% 100%;
animation: loading-pulse 1.5s infinite;
}
}
.operation-status {
display: inline-flex;
align-items: center;
margin-left: 10px;
padding: 2px 8px;
border-radius: 4px;
font-size: 12px;
background-color: #16a34a;
color: #fff;
animation: pulse 1.5s infinite;
&[data-status="stopping"] {
background-color: #9ca3af;
}
&[data-status="starting"] {
background-color: #4ade80;
}
&[data-status="error"] {
background-color: #f87171;
}
}
.mcp-market-header {
display: flex;
justify-content: space-between;
align-items: flex-start;
width: 100%;
.mcp-market-title {
flex-grow: 1;
margin-right: 20px;
max-width: calc(100% - 300px);
}
.mcp-market-name {
font-size: 14px;
font-weight: bold;
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
.server-status {
display: inline-flex;
align-items: center;
margin-left: 10px;
padding: 2px 8px;
border-radius: 4px;
font-size: 12px;
background-color: #22c55e;
color: #fff;
&.error {
background-color: #ef4444;
}
&.stopped {
background-color: #6b7280;
}
&.initializing {
background-color: #f59e0b;
animation: pulse 1.5s infinite;
}
.error-message {
margin-left: 4px;
font-size: 12px;
}
}
}
.repo-link {
color: var(--primary);
font-size: 12px;
display: inline-flex;
align-items: center;
gap: 4px;
text-decoration: none;
opacity: 0.8;
transition: opacity 0.2s;
&:hover {
opacity: 1;
}
svg {
width: 14px;
height: 14px;
}
}
.tags-container {
display: flex;
gap: 4px;
flex-wrap: wrap;
margin-bottom: 8px;
}
.tag {
background: var(--gray);
color: var(--black);
padding: 2px 6px;
border-radius: 4px;
font-size: 10px;
opacity: 0.8;
}
.mcp-market-info {
color: var(--black);
font-size: 12px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.mcp-market-actions {
display: flex;
gap: 12px;
align-items: flex-start;
flex-shrink: 0;
min-width: 180px;
justify-content: flex-end;
}
}
}
}
.array-input {
display: flex;
flex-direction: column;
gap: 12px;
width: 100%;
padding: 16px;
border: 1px solid var(--gray-200);
border-radius: 10px;
background-color: var(--white);
.array-input-item {
display: flex;
gap: 8px;
align-items: center;
width: 100%;
padding: 0;
input {
width: 100%;
padding: 8px 12px;
background-color: var(--gray-50);
border-radius: 6px;
transition: all 0.3s ease;
font-size: 13px;
border: 1px solid var(--gray-200);
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
&:focus {
background-color: var(--white);
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300);
}
}
}
:global(.icon-button.add-path-button) {
width: 100%;
background-color: var(--primary);
color: white;
padding: 8px 12px;
border-radius: 6px;
transition: all 0.3s ease;
margin-top: 8px;
display: flex;
align-items: center;
justify-content: center;
border: none;
height: 36px;
&:hover {
background-color: var(--primary-dark);
}
svg {
width: 16px;
height: 16px;
margin-right: 4px;
filter: brightness(2);
}
}
}
.path-list {
width: 100%;
display: flex;
flex-direction: column;
gap: 10px;
.path-item {
display: flex;
gap: 10px;
width: 100%;
input {
flex: 1;
width: 100%;
max-width: 100%;
padding: 10px;
border: var(--border-in-light);
border-radius: 10px;
box-sizing: border-box;
font-size: 14px;
background-color: var(--white);
color: var(--black);
&:hover {
border-color: var(--gray-300);
}
&:focus {
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
}
.browse-button {
padding: 8px;
border: var(--border-in-light);
border-radius: 10px;
background-color: transparent;
color: var(--black-50);
&:hover {
border-color: var(--primary);
color: var(--primary);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
.delete-button {
padding: 8px;
border: var(--border-in-light);
border-radius: 10px;
background-color: transparent;
color: var(--black-50);
&:hover {
border-color: var(--danger);
color: var(--danger);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
.file-input {
display: none;
}
}
.add-button {
align-self: flex-start;
display: flex;
align-items: center;
gap: 5px;
padding: 8px 12px;
background-color: transparent;
border: var(--border-in-light);
border-radius: 10px;
color: var(--black);
font-size: 12px;
margin-top: 5px;
&:hover {
border-color: var(--primary);
color: var(--primary);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
}
.config-section {
width: 100%;
.config-header {
margin-bottom: 12px;
.config-title {
font-size: 14px;
font-weight: 600;
color: var(--black);
text-transform: capitalize;
}
.config-description {
font-size: 12px;
color: var(--gray-500);
margin-top: 4px;
}
}
.array-input {
display: flex;
flex-direction: column;
gap: 12px;
width: 100%;
padding: 16px;
border: 1px solid var(--gray-200);
border-radius: 10px;
background-color: var(--white);
.array-input-item {
display: flex;
gap: 8px;
align-items: center;
width: 100%;
padding: 0;
input {
width: 100%;
padding: 8px 12px;
background-color: var(--gray-50);
border-radius: 6px;
transition: all 0.3s ease;
font-size: 13px;
border: 1px solid var(--gray-200);
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
&:focus {
background-color: var(--white);
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300);
}
}
:global(.icon-button) {
width: 32px;
height: 32px;
padding: 0;
border-radius: 6px;
background-color: transparent;
border: 1px solid var(--gray-200);
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
svg {
width: 16px;
height: 16px;
opacity: 0.7;
}
}
}
:global(.icon-button.add-path-button) {
width: 100%;
background-color: var(--primary);
color: white;
padding: 8px 12px;
border-radius: 6px;
transition: all 0.3s ease;
margin-top: 8px;
display: flex;
align-items: center;
justify-content: center;
border: none;
height: 36px;
&:hover {
background-color: var(--primary-dark);
}
svg {
width: 16px;
height: 16px;
margin-right: 4px;
filter: brightness(2);
}
}
}
}
.input-item {
width: 100%;
input {
width: 100%;
padding: 10px;
border: var(--border-in-light);
border-radius: 10px;
box-sizing: border-box;
font-size: 14px;
background-color: var(--white);
color: var(--black);
&:hover {
border-color: var(--gray-300);
}
&:focus {
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300) !important;
opacity: 1;
}
}
}
.tools-list {
display: flex;
flex-direction: column;
gap: 16px;
width: 100%;
padding: 20px;
max-width: 100%;
overflow-x: hidden;
word-break: break-word;
box-sizing: border-box;
.tool-item {
width: 100%;
box-sizing: border-box;
.tool-name {
font-size: 14px;
font-weight: 600;
color: var(--black);
margin-bottom: 8px;
padding-left: 12px;
border-left: 3px solid var(--primary);
box-sizing: border-box;
width: 100%;
}
.tool-description {
font-size: 13px;
color: var(--gray-500);
line-height: 1.6;
padding-left: 15px;
box-sizing: border-box;
width: 100%;
}
}
}
:global {
.modal-content {
margin-top: 20px;
max-width: 100%;
overflow-x: hidden;
}
.list {
padding: 10px;
margin-bottom: 10px;
background-color: var(--white);
}
.list-item {
border: none;
background-color: transparent;
border-radius: 10px;
padding: 10px;
margin-bottom: 10px;
display: flex;
flex-direction: column;
gap: 10px;
.list-header {
margin-bottom: 0;
.list-title {
font-size: 14px;
font-weight: bold;
text-transform: capitalize;
color: var(--black);
}
.list-sub-title {
font-size: 12px;
color: var(--gray-500);
margin-top: 4px;
}
}
}
}
}
@keyframes loading-pulse {
0% {
background-position: 200% 0;
}
100% {
background-position: -200% 0;
}
}
@keyframes pulse {
0% {
opacity: 0.6;
}
50% {
opacity: 1;
}
100% {
opacity: 0.6;
}
}

View File

@ -0,0 +1,755 @@
import { IconButton } from "./button";
import { ErrorBoundary } from "./error";
import styles from "./mcp-market.module.scss";
import EditIcon from "../icons/edit.svg";
import AddIcon from "../icons/add.svg";
import CloseIcon from "../icons/close.svg";
import DeleteIcon from "../icons/delete.svg";
import RestartIcon from "../icons/reload.svg";
import EyeIcon from "../icons/eye.svg";
import GithubIcon from "../icons/github.svg";
import { List, ListItem, Modal, showToast } from "./ui-lib";
import { useNavigate } from "react-router-dom";
import { useEffect, useState } from "react";
import {
addMcpServer,
getClientsStatus,
getClientTools,
getMcpConfigFromFile,
isMcpEnabled,
pauseMcpServer,
restartAllClients,
resumeMcpServer,
} from "../mcp/actions";
import {
ListToolsResponse,
McpConfigData,
PresetServer,
ServerConfig,
ServerStatusResponse,
} from "../mcp/types";
import clsx from "clsx";
import PlayIcon from "../icons/play.svg";
import StopIcon from "../icons/pause.svg";
import { Path } from "../constant";
interface ConfigProperty {
type: string;
description?: string;
required?: boolean;
minItems?: number;
}
export function McpMarketPage() {
const navigate = useNavigate();
const [mcpEnabled, setMcpEnabled] = useState(false);
const [searchText, setSearchText] = useState("");
const [userConfig, setUserConfig] = useState<Record<string, any>>({});
const [editingServerId, setEditingServerId] = useState<string | undefined>();
const [tools, setTools] = useState<ListToolsResponse["tools"] | null>(null);
const [viewingServerId, setViewingServerId] = useState<string | undefined>();
const [isLoading, setIsLoading] = useState(false);
const [config, setConfig] = useState<McpConfigData>();
const [clientStatuses, setClientStatuses] = useState<
Record<string, ServerStatusResponse>
>({});
const [loadingPresets, setLoadingPresets] = useState(true);
const [presetServers, setPresetServers] = useState<PresetServer[]>([]);
const [loadingStates, setLoadingStates] = useState<Record<string, string>>(
{},
);
// 检查 MCP 是否启用
useEffect(() => {
const checkMcpStatus = async () => {
const enabled = await isMcpEnabled();
setMcpEnabled(enabled);
if (!enabled) {
navigate(Path.Home);
}
};
checkMcpStatus();
}, [navigate]);
// 添加状态轮询
useEffect(() => {
if (!mcpEnabled || !config) return;
const updateStatuses = async () => {
const statuses = await getClientsStatus();
setClientStatuses(statuses);
};
// 立即执行一次
updateStatuses();
// 每 1000ms 轮询一次
const timer = setInterval(updateStatuses, 1000);
return () => clearInterval(timer);
}, [mcpEnabled, config]);
// 加载预设服务器
useEffect(() => {
const loadPresetServers = async () => {
if (!mcpEnabled) return;
try {
setLoadingPresets(true);
const response = await fetch("https://nextchat.club/mcp/list");
if (!response.ok) {
throw new Error("Failed to load preset servers");
}
const data = await response.json();
setPresetServers(data?.data ?? []);
} catch (error) {
console.error("Failed to load preset servers:", error);
showToast("Failed to load preset servers");
} finally {
setLoadingPresets(false);
}
};
loadPresetServers();
}, [mcpEnabled]);
// 加载初始状态
useEffect(() => {
const loadInitialState = async () => {
if (!mcpEnabled) return;
try {
setIsLoading(true);
const config = await getMcpConfigFromFile();
setConfig(config);
// 获取所有客户端的状态
const statuses = await getClientsStatus();
setClientStatuses(statuses);
} catch (error) {
console.error("Failed to load initial state:", error);
showToast("Failed to load initial state");
} finally {
setIsLoading(false);
}
};
loadInitialState();
}, [mcpEnabled]);
// 加载当前编辑服务器的配置
useEffect(() => {
if (!editingServerId || !config) return;
const currentConfig = config.mcpServers[editingServerId];
if (currentConfig) {
// 从当前配置中提取用户配置
const preset = presetServers.find((s) => s.id === editingServerId);
if (preset?.configSchema) {
const userConfig: Record<string, any> = {};
Object.entries(preset.argsMapping || {}).forEach(([key, mapping]) => {
if (mapping.type === "spread") {
// For spread types, extract the array from args.
const startPos = mapping.position ?? 0;
userConfig[key] = currentConfig.args.slice(startPos);
} else if (mapping.type === "single") {
// For single types, get a single value
userConfig[key] = currentConfig.args[mapping.position ?? 0];
} else if (
mapping.type === "env" &&
mapping.key &&
currentConfig.env
) {
// For env types, get values from environment variables
userConfig[key] = currentConfig.env[mapping.key];
}
});
setUserConfig(userConfig);
}
} else {
setUserConfig({});
}
}, [editingServerId, config, presetServers]);
if (!mcpEnabled) {
return null;
}
// 检查服务器是否已添加
const isServerAdded = (id: string) => {
return id in (config?.mcpServers ?? {});
};
// 保存服务器配置
const saveServerConfig = async () => {
const preset = presetServers.find((s) => s.id === editingServerId);
if (!preset || !preset.configSchema || !editingServerId) return;
const savingServerId = editingServerId;
setEditingServerId(undefined);
try {
updateLoadingState(savingServerId, "Updating configuration...");
// 构建服务器配置
const args = [...preset.baseArgs];
const env: Record<string, string> = {};
Object.entries(preset.argsMapping || {}).forEach(([key, mapping]) => {
const value = userConfig[key];
if (mapping.type === "spread" && Array.isArray(value)) {
const pos = mapping.position ?? 0;
args.splice(pos, 0, ...value);
} else if (
mapping.type === "single" &&
mapping.position !== undefined
) {
args[mapping.position] = value;
} else if (
mapping.type === "env" &&
mapping.key &&
typeof value === "string"
) {
env[mapping.key] = value;
}
});
const serverConfig: ServerConfig = {
command: preset.command,
args,
...(Object.keys(env).length > 0 ? { env } : {}),
};
const newConfig = await addMcpServer(savingServerId, serverConfig);
setConfig(newConfig);
showToast("Server configuration updated successfully");
} catch (error) {
showToast(
error instanceof Error ? error.message : "Failed to save configuration",
);
} finally {
updateLoadingState(savingServerId, null);
}
};
// 获取服务器支持的 Tools
const loadTools = async (id: string) => {
try {
const result = await getClientTools(id);
if (result) {
setTools(result);
} else {
throw new Error("Failed to load tools");
}
} catch (error) {
showToast("Failed to load tools");
console.error(error);
setTools(null);
}
};
// 更新加载状态的辅助函数
const updateLoadingState = (id: string, message: string | null) => {
setLoadingStates((prev) => {
if (message === null) {
const { [id]: _, ...rest } = prev;
return rest;
}
return { ...prev, [id]: message };
});
};
// 修改添加服务器函数
const addServer = async (preset: PresetServer) => {
if (!preset.configurable) {
try {
const serverId = preset.id;
updateLoadingState(serverId, "Creating MCP client...");
const serverConfig: ServerConfig = {
command: preset.command,
args: [...preset.baseArgs],
};
const newConfig = await addMcpServer(preset.id, serverConfig);
setConfig(newConfig);
// 更新状态
const statuses = await getClientsStatus();
setClientStatuses(statuses);
} finally {
updateLoadingState(preset.id, null);
}
} else {
// 如果需要配置,打开配置对话框
setEditingServerId(preset.id);
setUserConfig({});
}
};
// 修改暂停服务器函数
const pauseServer = async (id: string) => {
try {
updateLoadingState(id, "Stopping server...");
const newConfig = await pauseMcpServer(id);
setConfig(newConfig);
showToast("Server stopped successfully");
} catch (error) {
showToast("Failed to stop server");
console.error(error);
} finally {
updateLoadingState(id, null);
}
};
// Restart server
const restartServer = async (id: string) => {
try {
updateLoadingState(id, "Starting server...");
await resumeMcpServer(id);
} catch (error) {
showToast(
error instanceof Error
? error.message
: "Failed to start server, please check logs",
);
console.error(error);
} finally {
updateLoadingState(id, null);
}
};
// Restart all clients
const handleRestartAll = async () => {
try {
updateLoadingState("all", "Restarting all servers...");
const newConfig = await restartAllClients();
setConfig(newConfig);
showToast("Restarting all clients");
} catch (error) {
showToast("Failed to restart clients");
console.error(error);
} finally {
updateLoadingState("all", null);
}
};
// Render configuration form
const renderConfigForm = () => {
const preset = presetServers.find((s) => s.id === editingServerId);
if (!preset?.configSchema) return null;
return Object.entries(preset.configSchema.properties).map(
([key, prop]: [string, ConfigProperty]) => {
if (prop.type === "array") {
const currentValue = userConfig[key as keyof typeof userConfig] || [];
const itemLabel = (prop as any).itemLabel || key;
const addButtonText =
(prop as any).addButtonText || `Add ${itemLabel}`;
return (
<ListItem
key={key}
title={key}
subTitle={prop.description}
vertical
>
<div className={styles["path-list"]}>
{(currentValue as string[]).map(
(value: string, index: number) => (
<div key={index} className={styles["path-item"]}>
<input
type="text"
value={value}
placeholder={`${itemLabel} ${index + 1}`}
onChange={(e) => {
const newValue = [...currentValue] as string[];
newValue[index] = e.target.value;
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
<IconButton
icon={<DeleteIcon />}
className={styles["delete-button"]}
onClick={() => {
const newValue = [...currentValue] as string[];
newValue.splice(index, 1);
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
</div>
),
)}
<IconButton
icon={<AddIcon />}
text={addButtonText}
className={styles["add-button"]}
bordered
onClick={() => {
const newValue = [...currentValue, ""] as string[];
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
</div>
</ListItem>
);
} else if (prop.type === "string") {
const currentValue = userConfig[key as keyof typeof userConfig] || "";
return (
<ListItem key={key} title={key} subTitle={prop.description}>
<input
aria-label={key}
type="text"
value={currentValue}
placeholder={`Enter ${key}`}
onChange={(e) => {
setUserConfig({ ...userConfig, [key]: e.target.value });
}}
/>
</ListItem>
);
}
return null;
},
);
};
const checkServerStatus = (clientId: string) => {
return clientStatuses[clientId] || { status: "undefined", errorMsg: null };
};
const getServerStatusDisplay = (clientId: string) => {
const status = checkServerStatus(clientId);
const statusMap = {
undefined: null, // 未配置/未找到不显示
// 添加初始化状态
initializing: (
<span className={clsx(styles["server-status"], styles["initializing"])}>
Initializing
</span>
),
paused: (
<span className={clsx(styles["server-status"], styles["stopped"])}>
Stopped
</span>
),
active: <span className={styles["server-status"]}>Running</span>,
error: (
<span className={clsx(styles["server-status"], styles["error"])}>
Error
<span className={styles["error-message"]}>: {status.errorMsg}</span>
</span>
),
};
return statusMap[status.status];
};
// Get the type of operation status
const getOperationStatusType = (message: string) => {
if (message.toLowerCase().includes("stopping")) return "stopping";
if (message.toLowerCase().includes("starting")) return "starting";
if (message.toLowerCase().includes("error")) return "error";
return "default";
};
// 渲染服务器列表
const renderServerList = () => {
if (loadingPresets) {
return (
<div className={styles["loading-container"]}>
<div className={styles["loading-text"]}>
Loading preset server list...
</div>
</div>
);
}
if (!Array.isArray(presetServers) || presetServers.length === 0) {
return (
<div className={styles["empty-container"]}>
<div className={styles["empty-text"]}>No servers available</div>
</div>
);
}
return presetServers
.filter((server) => {
if (searchText.length === 0) return true;
const searchLower = searchText.toLowerCase();
return (
server.name.toLowerCase().includes(searchLower) ||
server.description.toLowerCase().includes(searchLower) ||
server.tags.some((tag) => tag.toLowerCase().includes(searchLower))
);
})
.sort((a, b) => {
const aStatus = checkServerStatus(a.id).status;
const bStatus = checkServerStatus(b.id).status;
const aLoading = loadingStates[a.id];
const bLoading = loadingStates[b.id];
// 定义状态优先级
const statusPriority: Record<string, number> = {
error: 0, // Highest priority for error status
active: 1, // Second for active
initializing: 2, // Initializing
starting: 3, // Starting
stopping: 4, // Stopping
paused: 5, // Paused
undefined: 6, // Lowest priority for undefined
};
// Get actual status (including loading status)
const getEffectiveStatus = (status: string, loading?: string) => {
if (loading) {
const operationType = getOperationStatusType(loading);
return operationType === "default" ? status : operationType;
}
if (status === "initializing" && !loading) {
return "active";
}
return status;
};
const aEffectiveStatus = getEffectiveStatus(aStatus, aLoading);
const bEffectiveStatus = getEffectiveStatus(bStatus, bLoading);
// 首先按状态排序
if (aEffectiveStatus !== bEffectiveStatus) {
return (
(statusPriority[aEffectiveStatus] ?? 6) -
(statusPriority[bEffectiveStatus] ?? 6)
);
}
// Sort by name when statuses are the same
return a.name.localeCompare(b.name);
})
.map((server) => (
<div
className={clsx(styles["mcp-market-item"], {
[styles["loading"]]: loadingStates[server.id],
})}
key={server.id}
>
<div className={styles["mcp-market-header"]}>
<div className={styles["mcp-market-title"]}>
<div className={styles["mcp-market-name"]}>
{server.name}
{loadingStates[server.id] && (
<span
className={styles["operation-status"]}
data-status={getOperationStatusType(
loadingStates[server.id],
)}
>
{loadingStates[server.id]}
</span>
)}
{!loadingStates[server.id] && getServerStatusDisplay(server.id)}
{server.repo && (
<a
href={server.repo}
target="_blank"
rel="noopener noreferrer"
className={styles["repo-link"]}
title="Open repository"
>
<GithubIcon />
</a>
)}
</div>
<div className={styles["tags-container"]}>
{server.tags.map((tag, index) => (
<span key={index} className={styles["tag"]}>
{tag}
</span>
))}
</div>
<div
className={clsx(styles["mcp-market-info"], "one-line")}
title={server.description}
>
{server.description}
</div>
</div>
<div className={styles["mcp-market-actions"]}>
{isServerAdded(server.id) ? (
<>
{server.configurable && (
<IconButton
icon={<EditIcon />}
text="Configure"
onClick={() => setEditingServerId(server.id)}
disabled={isLoading}
/>
)}
{checkServerStatus(server.id).status === "paused" ? (
<>
<IconButton
icon={<PlayIcon />}
text="Start"
onClick={() => restartServer(server.id)}
disabled={isLoading}
/>
{/* <IconButton
icon={<DeleteIcon />}
text="Remove"
onClick={() => removeServer(server.id)}
disabled={isLoading}
/> */}
</>
) : (
<>
<IconButton
icon={<EyeIcon />}
text="Tools"
onClick={async () => {
setViewingServerId(server.id);
await loadTools(server.id);
}}
disabled={
isLoading ||
checkServerStatus(server.id).status === "error"
}
/>
<IconButton
icon={<StopIcon />}
text="Stop"
onClick={() => pauseServer(server.id)}
disabled={isLoading}
/>
</>
)}
</>
) : (
<IconButton
icon={<AddIcon />}
text="Add"
onClick={() => addServer(server)}
disabled={isLoading}
/>
)}
</div>
</div>
</div>
));
};
return (
<ErrorBoundary>
<div className={styles["mcp-market-page"]}>
<div className="window-header">
<div className="window-header-title">
<div className="window-header-main-title">
MCP Market
{loadingStates["all"] && (
<span className={styles["loading-indicator"]}>
{loadingStates["all"]}
</span>
)}
</div>
<div className="window-header-sub-title">
{Object.keys(config?.mcpServers ?? {}).length} servers configured
</div>
</div>
<div className="window-actions">
<div className="window-action-button">
<IconButton
icon={<RestartIcon />}
bordered
onClick={handleRestartAll}
text="Restart All"
disabled={isLoading}
/>
</div>
<div className="window-action-button">
<IconButton
icon={<CloseIcon />}
bordered
onClick={() => navigate(-1)}
disabled={isLoading}
/>
</div>
</div>
</div>
<div className={styles["mcp-market-page-body"]}>
<div className={styles["mcp-market-filter"]}>
<input
type="text"
className={styles["search-bar"]}
placeholder={"Search MCP Server"}
autoFocus
onInput={(e) => setSearchText(e.currentTarget.value)}
/>
</div>
<div className={styles["server-list"]}>{renderServerList()}</div>
</div>
{/*编辑服务器配置*/}
{editingServerId && (
<div className="modal-mask">
<Modal
title={`Configure Server - ${editingServerId}`}
onClose={() => !isLoading && setEditingServerId(undefined)}
actions={[
<IconButton
key="cancel"
text="Cancel"
onClick={() => setEditingServerId(undefined)}
bordered
disabled={isLoading}
/>,
<IconButton
key="confirm"
text="Save"
type="primary"
onClick={saveServerConfig}
bordered
disabled={isLoading}
/>,
]}
>
<List>{renderConfigForm()}</List>
</Modal>
</div>
)}
{viewingServerId && (
<div className="modal-mask">
<Modal
title={`Server Details - ${viewingServerId}`}
onClose={() => setViewingServerId(undefined)}
actions={[
<IconButton
key="close"
text="Close"
onClick={() => setViewingServerId(undefined)}
bordered
/>,
]}
>
<div className={styles["tools-list"]}>
{isLoading ? (
<div>Loading...</div>
) : tools?.tools ? (
tools.tools.map(
(tool: ListToolsResponse["tools"], index: number) => (
<div key={index} className={styles["tool-item"]}>
<div className={styles["tool-name"]}>{tool.name}</div>
<div className={styles["tool-description"]}>
{tool.description}
</div>
</div>
),
)
) : (
<div>No tools available</div>
)}
</div>
</Modal>
</div>
)}
</div>
</ErrorBoundary>
);
}

View File

@ -8,6 +8,7 @@ import Locale from "../locales";
import styles from "./message-selector.module.scss"; import styles from "./message-selector.module.scss";
import { getMessageTextContent } from "../utils"; import { getMessageTextContent } from "../utils";
import clsx from "clsx";
function useShiftRange() { function useShiftRange() {
const [startIndex, setStartIndex] = useState<number>(); const [startIndex, setStartIndex] = useState<number>();
@ -71,6 +72,7 @@ export function MessageSelector(props: {
defaultSelectAll?: boolean; defaultSelectAll?: boolean;
onSelected?: (messages: ChatMessage[]) => void; onSelected?: (messages: ChatMessage[]) => void;
}) { }) {
const LATEST_COUNT = 4;
const chatStore = useChatStore(); const chatStore = useChatStore();
const session = chatStore.currentSession(); const session = chatStore.currentSession();
const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming; const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming;
@ -141,15 +143,13 @@ export function MessageSelector(props: {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [startIndex, endIndex]); }, [startIndex, endIndex]);
const LATEST_COUNT = 4;
return ( return (
<div className={styles["message-selector"]}> <div className={styles["message-selector"]}>
<div className={styles["message-filter"]}> <div className={styles["message-filter"]}>
<input <input
type="text" type="text"
placeholder={Locale.Select.Search} placeholder={Locale.Select.Search}
className={styles["filter-item"] + " " + styles["search-bar"]} className={clsx(styles["filter-item"], styles["search-bar"])}
value={searchInput} value={searchInput}
onInput={(e) => { onInput={(e) => {
setSearchInput(e.currentTarget.value); setSearchInput(e.currentTarget.value);
@ -196,9 +196,9 @@ export function MessageSelector(props: {
return ( return (
<div <div
className={`${styles["message"]} ${ className={clsx(styles["message"], {
props.selection.has(m.id!) && styles["message-selected"] [styles["message-selected"]]: props.selection.has(m.id!),
}`} })}
key={i} key={i}
onClick={() => { onClick={() => {
props.updateSelection((selection) => { props.updateSelection((selection) => {
@ -221,7 +221,7 @@ export function MessageSelector(props: {
<div className={styles["date"]}> <div className={styles["date"]}>
{new Date(m.date).toLocaleString()} {new Date(m.date).toLocaleString()}
</div> </div>
<div className={`${styles["content"]} one-line`}> <div className={clsx(styles["content"], "one-line")}>
{getMessageTextContent(m)} {getMessageTextContent(m)}
</div> </div>
</div> </div>

View File

@ -7,6 +7,7 @@ import { ListItem, Select } from "./ui-lib";
import { useAllModels } from "../utils/hooks"; import { useAllModels } from "../utils/hooks";
import { groupBy } from "lodash-es"; import { groupBy } from "lodash-es";
import styles from "./model-config.module.scss"; import styles from "./model-config.module.scss";
import { getModelProvider } from "../utils/model";
export function ModelConfigList(props: { export function ModelConfigList(props: {
modelConfig: ModelConfig; modelConfig: ModelConfig;
@ -28,7 +29,9 @@ export function ModelConfigList(props: {
value={value} value={value}
align="left" align="left"
onChange={(e) => { onChange={(e) => {
const [model, providerName] = e.currentTarget.value.split("@"); const [model, providerName] = getModelProvider(
e.currentTarget.value,
);
props.updateConfig((config) => { props.updateConfig((config) => {
config.model = ModalConfigValidator.model(model); config.model = ModalConfigValidator.model(model);
config.providerName = providerName as ServiceProvider; config.providerName = providerName as ServiceProvider;
@ -247,7 +250,9 @@ export function ModelConfigList(props: {
aria-label={Locale.Settings.CompressModel.Title} aria-label={Locale.Settings.CompressModel.Title}
value={compressModelValue} value={compressModelValue}
onChange={(e) => { onChange={(e) => {
const [model, providerName] = e.currentTarget.value.split("@"); const [model, providerName] = getModelProvider(
e.currentTarget.value,
);
props.updateConfig((config) => { props.updateConfig((config) => {
config.compressModel = ModalConfigValidator.model(model); config.compressModel = ModalConfigValidator.model(model);
config.compressProviderName = providerName as ServiceProvider; config.compressProviderName = providerName as ServiceProvider;

View File

@ -16,6 +16,7 @@ import { MaskAvatar } from "./mask";
import { useCommand } from "../command"; import { useCommand } from "../command";
import { showConfirm } from "./ui-lib"; import { showConfirm } from "./ui-lib";
import { BUILTIN_MASK_STORE } from "../masks"; import { BUILTIN_MASK_STORE } from "../masks";
import clsx from "clsx";
function MaskItem(props: { mask: Mask; onClick?: () => void }) { function MaskItem(props: { mask: Mask; onClick?: () => void }) {
return ( return (
@ -24,7 +25,9 @@ function MaskItem(props: { mask: Mask; onClick?: () => void }) {
avatar={props.mask.avatar} avatar={props.mask.avatar}
model={props.mask.modelConfig.model} model={props.mask.modelConfig.model}
/> />
<div className={styles["mask-name"] + " one-line"}>{props.mask.name}</div> <div className={clsx(styles["mask-name"], "one-line")}>
{props.mask.name}
</div>
</div> </div>
); );
} }

View File

@ -28,6 +28,7 @@ import {
import Locale from "../locales"; import Locale from "../locales";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { useState } from "react"; import { useState } from "react";
import clsx from "clsx";
export function PluginPage() { export function PluginPage() {
const navigate = useNavigate(); const navigate = useNavigate();
@ -199,7 +200,7 @@ export function PluginPage() {
<div className={styles["mask-name"]}> <div className={styles["mask-name"]}>
{m.title}@<small>{m.version}</small> {m.title}@<small>{m.version}</small>
</div> </div>
<div className={styles["mask-info"] + " one-line"}> <div className={clsx(styles["mask-info"], "one-line")}>
{Locale.Plugin.Item.Info( {Locale.Plugin.Item.Info(
FunctionToolService.add(m).length, FunctionToolService.add(m).length,
)} )}
@ -335,7 +336,10 @@ export function PluginPage() {
<ListItem <ListItem
subTitle={ subTitle={
<div <div
className={`markdown-body ${pluginStyles["plugin-content"]}`} className={clsx(
"markdown-body",
pluginStyles["plugin-content"],
)}
dir="auto" dir="auto"
> >
<pre> <pre>

View File

@ -0,0 +1 @@
export * from "./realtime-chat";

View File

@ -0,0 +1,74 @@
.realtime-chat {
width: 100%;
justify-content: center;
align-items: center;
position: relative;
display: flex;
flex-direction: column;
height: 100%;
padding: 20px;
box-sizing: border-box;
.circle-mic {
width: 150px;
height: 150px;
border-radius: 50%;
background: linear-gradient(to bottom right, #a0d8ef, #f0f8ff);
display: flex;
justify-content: center;
align-items: center;
}
.icon-center {
font-size: 24px;
}
.bottom-icons {
display: flex;
justify-content: space-between;
align-items: center;
width: 100%;
position: absolute;
bottom: 20px;
box-sizing: border-box;
padding: 0 20px;
}
.icon-left,
.icon-right {
width: 46px;
height: 46px;
font-size: 36px;
background: var(--second);
border-radius: 50%;
padding: 2px;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
&:hover {
opacity: 0.8;
}
}
&.mobile {
display: none;
}
}
.pulse {
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
transform: scale(1);
opacity: 0.7;
}
50% {
transform: scale(1.1);
opacity: 1;
}
100% {
transform: scale(1);
opacity: 0.7;
}
}

View File

@ -0,0 +1,359 @@
import VoiceIcon from "@/app/icons/voice.svg";
import VoiceOffIcon from "@/app/icons/voice-off.svg";
import PowerIcon from "@/app/icons/power.svg";
import styles from "./realtime-chat.module.scss";
import clsx from "clsx";
import { useState, useRef, useEffect } from "react";
import { useChatStore, createMessage, useAppConfig } from "@/app/store";
import { IconButton } from "@/app/components/button";
import {
Modality,
RTClient,
RTInputAudioItem,
RTResponse,
TurnDetection,
} from "rt-client";
import { AudioHandler } from "@/app/lib/audio";
import { uploadImage } from "@/app/utils/chat";
import { VoicePrint } from "@/app/components/voice-print";
interface RealtimeChatProps {
onClose?: () => void;
onStartVoice?: () => void;
onPausedVoice?: () => void;
}
export function RealtimeChat({
onClose,
onStartVoice,
onPausedVoice,
}: RealtimeChatProps) {
const chatStore = useChatStore();
const session = chatStore.currentSession();
const config = useAppConfig();
const [status, setStatus] = useState("");
const [isRecording, setIsRecording] = useState(false);
const [isConnected, setIsConnected] = useState(false);
const [isConnecting, setIsConnecting] = useState(false);
const [modality, setModality] = useState("audio");
const [useVAD, setUseVAD] = useState(true);
const [frequencies, setFrequencies] = useState<Uint8Array | undefined>();
const clientRef = useRef<RTClient | null>(null);
const audioHandlerRef = useRef<AudioHandler | null>(null);
const initRef = useRef(false);
const temperature = config.realtimeConfig.temperature;
const apiKey = config.realtimeConfig.apiKey;
const model = config.realtimeConfig.model;
const azure = config.realtimeConfig.provider === "Azure";
const azureEndpoint = config.realtimeConfig.azure.endpoint;
const azureDeployment = config.realtimeConfig.azure.deployment;
const voice = config.realtimeConfig.voice;
const handleConnect = async () => {
if (isConnecting) return;
if (!isConnected) {
try {
setIsConnecting(true);
clientRef.current = azure
? new RTClient(
new URL(azureEndpoint),
{ key: apiKey },
{ deployment: azureDeployment },
)
: new RTClient({ key: apiKey }, { model });
const modalities: Modality[] =
modality === "audio" ? ["text", "audio"] : ["text"];
const turnDetection: TurnDetection = useVAD
? { type: "server_vad" }
: null;
await clientRef.current.configure({
instructions: "",
voice,
input_audio_transcription: { model: "whisper-1" },
turn_detection: turnDetection,
tools: [],
temperature,
modalities,
});
startResponseListener();
setIsConnected(true);
// TODO
// try {
// const recentMessages = chatStore.getMessagesWithMemory();
// for (const message of recentMessages) {
// const { role, content } = message;
// if (typeof content === "string") {
// await clientRef.current.sendItem({
// type: "message",
// role: role as any,
// content: [
// {
// type: (role === "assistant" ? "text" : "input_text") as any,
// text: content as string,
// },
// ],
// });
// }
// }
// // await clientRef.current.generateResponse();
// } catch (error) {
// console.error("Set message failed:", error);
// }
} catch (error) {
console.error("Connection failed:", error);
setStatus("Connection failed");
} finally {
setIsConnecting(false);
}
} else {
await disconnect();
}
};
const disconnect = async () => {
if (clientRef.current) {
try {
await clientRef.current.close();
clientRef.current = null;
setIsConnected(false);
} catch (error) {
console.error("Disconnect failed:", error);
}
}
};
const startResponseListener = async () => {
if (!clientRef.current) return;
try {
for await (const serverEvent of clientRef.current.events()) {
if (serverEvent.type === "response") {
await handleResponse(serverEvent);
} else if (serverEvent.type === "input_audio") {
await handleInputAudio(serverEvent);
}
}
} catch (error) {
if (clientRef.current) {
console.error("Response iteration error:", error);
}
}
};
const handleResponse = async (response: RTResponse) => {
for await (const item of response) {
if (item.type === "message" && item.role === "assistant") {
const botMessage = createMessage({
role: item.role,
content: "",
});
// add bot message first
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([botMessage]);
});
let hasAudio = false;
for await (const content of item) {
if (content.type === "text") {
for await (const text of content.textChunks()) {
botMessage.content += text;
}
} else if (content.type === "audio") {
const textTask = async () => {
for await (const text of content.transcriptChunks()) {
botMessage.content += text;
}
};
const audioTask = async () => {
audioHandlerRef.current?.startStreamingPlayback();
for await (const audio of content.audioChunks()) {
hasAudio = true;
audioHandlerRef.current?.playChunk(audio);
}
};
await Promise.all([textTask(), audioTask()]);
}
// update message.content
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
}
if (hasAudio) {
// upload audio get audio_url
const blob = audioHandlerRef.current?.savePlayFile();
uploadImage(blob!).then((audio_url) => {
botMessage.audio_url = audio_url;
// update text and audio_url
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
}
}
};
const handleInputAudio = async (item: RTInputAudioItem) => {
await item.waitForCompletion();
if (item.transcription) {
const userMessage = createMessage({
role: "user",
content: item.transcription,
});
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([userMessage]);
});
// save input audio_url, and update session
const { audioStartMillis, audioEndMillis } = item;
// upload audio get audio_url
const blob = audioHandlerRef.current?.saveRecordFile(
audioStartMillis,
audioEndMillis,
);
uploadImage(blob!).then((audio_url) => {
userMessage.audio_url = audio_url;
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
// stop streaming play after get input audio.
audioHandlerRef.current?.stopStreamingPlayback();
};
const toggleRecording = async () => {
if (!isRecording && clientRef.current) {
try {
if (!audioHandlerRef.current) {
audioHandlerRef.current = new AudioHandler();
await audioHandlerRef.current.initialize();
}
await audioHandlerRef.current.startRecording(async (chunk) => {
await clientRef.current?.sendAudio(chunk);
});
setIsRecording(true);
} catch (error) {
console.error("Failed to start recording:", error);
}
} else if (audioHandlerRef.current) {
try {
audioHandlerRef.current.stopRecording();
if (!useVAD) {
const inputAudio = await clientRef.current?.commitAudio();
await handleInputAudio(inputAudio!);
await clientRef.current?.generateResponse();
}
setIsRecording(false);
} catch (error) {
console.error("Failed to stop recording:", error);
}
}
};
useEffect(() => {
// 防止重复初始化
if (initRef.current) return;
initRef.current = true;
const initAudioHandler = async () => {
const handler = new AudioHandler();
await handler.initialize();
audioHandlerRef.current = handler;
await handleConnect();
await toggleRecording();
};
initAudioHandler().catch((error) => {
setStatus(error);
console.error(error);
});
return () => {
if (isRecording) {
toggleRecording();
}
audioHandlerRef.current?.close().catch(console.error);
disconnect();
};
}, []);
useEffect(() => {
let animationFrameId: number;
if (isConnected && isRecording) {
const animationFrame = () => {
if (audioHandlerRef.current) {
const freqData = audioHandlerRef.current.getByteFrequencyData();
setFrequencies(freqData);
}
animationFrameId = requestAnimationFrame(animationFrame);
};
animationFrameId = requestAnimationFrame(animationFrame);
} else {
setFrequencies(undefined);
}
return () => {
if (animationFrameId) {
cancelAnimationFrame(animationFrameId);
}
};
}, [isConnected, isRecording]);
// update session params
useEffect(() => {
clientRef.current?.configure({ voice });
}, [voice]);
useEffect(() => {
clientRef.current?.configure({ temperature });
}, [temperature]);
const handleClose = async () => {
onClose?.();
if (isRecording) {
await toggleRecording();
}
disconnect().catch(console.error);
};
return (
<div className={styles["realtime-chat"]}>
<div
className={clsx(styles["circle-mic"], {
[styles["pulse"]]: isRecording,
})}
>
<VoicePrint frequencies={frequencies} isActive={isRecording} />
</div>
<div className={styles["bottom-icons"]}>
<div>
<IconButton
icon={isRecording ? <VoiceIcon /> : <VoiceOffIcon />}
onClick={toggleRecording}
disabled={!isConnected}
shadow
bordered
/>
</div>
<div className={styles["icon-center"]}>{status}</div>
<div>
<IconButton
icon={<PowerIcon />}
onClick={handleClose}
shadow
bordered
/>
</div>
</div>
</div>
);
}

View File

@ -0,0 +1,173 @@
import { RealtimeConfig } from "@/app/store";
import Locale from "@/app/locales";
import { ListItem, Select, PasswordInput } from "@/app/components/ui-lib";
import { InputRange } from "@/app/components/input-range";
import { Voice } from "rt-client";
import { ServiceProvider } from "@/app/constant";
const providers = [ServiceProvider.OpenAI, ServiceProvider.Azure];
const models = ["gpt-4o-realtime-preview-2024-10-01"];
const voice = ["alloy", "shimmer", "echo"];
export function RealtimeConfigList(props: {
realtimeConfig: RealtimeConfig;
updateConfig: (updater: (config: RealtimeConfig) => void) => void;
}) {
const azureConfigComponent = props.realtimeConfig.provider ===
ServiceProvider.Azure && (
<>
<ListItem
title={Locale.Settings.Realtime.Azure.Endpoint.Title}
subTitle={Locale.Settings.Realtime.Azure.Endpoint.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.endpoint}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Endpoint.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.endpoint = e.currentTarget.value),
);
}}
/>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Azure.Deployment.Title}
subTitle={Locale.Settings.Realtime.Azure.Deployment.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.deployment}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Deployment.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.deployment = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
return (
<>
<ListItem
title={Locale.Settings.Realtime.Enable.Title}
subTitle={Locale.Settings.Realtime.Enable.SubTitle}
>
<input
type="checkbox"
checked={props.realtimeConfig.enable}
onChange={(e) =>
props.updateConfig(
(config) => (config.enable = e.currentTarget.checked),
)
}
></input>
</ListItem>
{props.realtimeConfig.enable && (
<>
<ListItem
title={Locale.Settings.Realtime.Provider.Title}
subTitle={Locale.Settings.Realtime.Provider.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Provider.Title}
value={props.realtimeConfig.provider}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.provider = e.target.value as ServiceProvider),
);
}}
>
{providers.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Model.Title}
subTitle={Locale.Settings.Realtime.Model.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Model.Title}
value={props.realtimeConfig.model}
onChange={(e) => {
props.updateConfig((config) => (config.model = e.target.value));
}}
>
{models.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.ApiKey.Title}
subTitle={Locale.Settings.Realtime.ApiKey.SubTitle}
>
<PasswordInput
aria={Locale.Settings.ShowPassword}
aria-label={Locale.Settings.Realtime.ApiKey.Title}
value={props.realtimeConfig.apiKey}
type="text"
placeholder={Locale.Settings.Realtime.ApiKey.Placeholder}
onChange={(e) => {
props.updateConfig(
(config) => (config.apiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
{azureConfigComponent}
<ListItem
title={Locale.Settings.TTS.Voice.Title}
subTitle={Locale.Settings.TTS.Voice.SubTitle}
>
<Select
value={props.realtimeConfig.voice}
onChange={(e) => {
props.updateConfig(
(config) => (config.voice = e.currentTarget.value as Voice),
);
}}
>
{voice.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Temperature.Title}
subTitle={Locale.Settings.Realtime.Temperature.SubTitle}
>
<InputRange
aria={Locale.Settings.Temperature.Title}
value={props.realtimeConfig?.temperature?.toFixed(1)}
min="0.6"
max="1"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.temperature = e.currentTarget.valueAsNumber),
);
}}
></InputRange>
</ListItem>
</>
)}
</>
);
}

View File

@ -4,6 +4,7 @@ import { Select } from "@/app/components/ui-lib";
import { IconButton } from "@/app/components/button"; import { IconButton } from "@/app/components/button";
import Locale from "@/app/locales"; import Locale from "@/app/locales";
import { useSdStore } from "@/app/store/sd"; import { useSdStore } from "@/app/store/sd";
import clsx from "clsx";
export const params = [ export const params = [
{ {
@ -136,7 +137,7 @@ export function ControlParamItem(props: {
className?: string; className?: string;
}) { }) {
return ( return (
<div className={styles["ctrl-param-item"] + ` ${props.className || ""}`}> <div className={clsx(styles["ctrl-param-item"], props.className)}>
<div className={styles["ctrl-param-item-header"]}> <div className={styles["ctrl-param-item-header"]}>
<div className={styles["ctrl-param-item-title"]}> <div className={styles["ctrl-param-item-title"]}>
<div> <div>

View File

@ -36,6 +36,7 @@ import { removeImage } from "@/app/utils/chat";
import { SideBar } from "./sd-sidebar"; import { SideBar } from "./sd-sidebar";
import { WindowContent } from "@/app/components/home"; import { WindowContent } from "@/app/components/home";
import { params } from "./sd-panel"; import { params } from "./sd-panel";
import clsx from "clsx";
function getSdTaskStatus(item: any) { function getSdTaskStatus(item: any) {
let s: string; let s: string;
@ -104,7 +105,7 @@ export function Sd() {
return ( return (
<> <>
<SideBar className={isSd ? homeStyles["sidebar-show"] : ""} /> <SideBar className={clsx({ [homeStyles["sidebar-show"]]: isSd })} />
<WindowContent> <WindowContent>
<div className={chatStyles.chat} key={"1"}> <div className={chatStyles.chat} key={"1"}>
<div className="window-header" data-tauri-drag-region> <div className="window-header" data-tauri-drag-region>
@ -121,7 +122,10 @@ export function Sd() {
</div> </div>
)} )}
<div <div
className={`window-header-title ${chatStyles["chat-body-title"]}`} className={clsx(
"window-header-title",
chatStyles["chat-body-title"],
)}
> >
<div className={`window-header-main-title`}>Stability AI</div> <div className={`window-header-main-title`}>Stability AI</div>
<div className="window-header-sub-title"> <div className="window-header-sub-title">

View File

@ -49,7 +49,7 @@ import Locale, {
changeLang, changeLang,
getLang, getLang,
} from "../locales"; } from "../locales";
import { copyToClipboard } from "../utils"; import { copyToClipboard, clientUpdate, semverCompare } from "../utils";
import Link from "next/link"; import Link from "next/link";
import { import {
Anthropic, Anthropic,
@ -59,6 +59,7 @@ import {
ByteDance, ByteDance,
Alibaba, Alibaba,
Moonshot, Moonshot,
XAI,
Google, Google,
GoogleSafetySettingsThreshold, GoogleSafetySettingsThreshold,
OPENAI_BASE_URL, OPENAI_BASE_URL,
@ -71,6 +72,9 @@ import {
Stability, Stability,
Iflytek, Iflytek,
SAAS_CHAT_URL, SAAS_CHAT_URL,
ChatGLM,
DeepSeek,
SiliconFlow,
} from "../constant"; } from "../constant";
import { Prompt, SearchService, usePromptStore } from "../store/prompt"; import { Prompt, SearchService, usePromptStore } from "../store/prompt";
import { ErrorBoundary } from "./error"; import { ErrorBoundary } from "./error";
@ -83,6 +87,7 @@ import { nanoid } from "nanoid";
import { useMaskStore } from "../store/mask"; import { useMaskStore } from "../store/mask";
import { ProviderType } from "../utils/cloud"; import { ProviderType } from "../utils/cloud";
import { TTSConfigList } from "./tts-config"; import { TTSConfigList } from "./tts-config";
import { RealtimeConfigList } from "./realtime-chat/realtime-config";
function EditPromptModal(props: { id: string; onClose: () => void }) { function EditPromptModal(props: { id: string; onClose: () => void }) {
const promptStore = usePromptStore(); const promptStore = usePromptStore();
@ -585,7 +590,7 @@ export function Settings() {
const [checkingUpdate, setCheckingUpdate] = useState(false); const [checkingUpdate, setCheckingUpdate] = useState(false);
const currentVersion = updateStore.formatVersion(updateStore.version); const currentVersion = updateStore.formatVersion(updateStore.version);
const remoteId = updateStore.formatVersion(updateStore.remoteVersion); const remoteId = updateStore.formatVersion(updateStore.remoteVersion);
const hasNewVersion = currentVersion !== remoteId; const hasNewVersion = semverCompare(currentVersion, remoteId) === -1;
const updateUrl = getClientConfig()?.isApp ? RELEASE_URL : UPDATE_URL; const updateUrl = getClientConfig()?.isApp ? RELEASE_URL : UPDATE_URL;
function checkUpdate(force = false) { function checkUpdate(force = false) {
@ -1194,6 +1199,167 @@ export function Settings() {
</> </>
); );
const deepseekConfigComponent = accessStore.provider ===
ServiceProvider.DeepSeek && (
<>
<ListItem
title={Locale.Settings.Access.DeepSeek.Endpoint.Title}
subTitle={
Locale.Settings.Access.DeepSeek.Endpoint.SubTitle +
DeepSeek.ExampleEndpoint
}
>
<input
aria-label={Locale.Settings.Access.DeepSeek.Endpoint.Title}
type="text"
value={accessStore.deepseekUrl}
placeholder={DeepSeek.ExampleEndpoint}
onChange={(e) =>
accessStore.update(
(access) => (access.deepseekUrl = e.currentTarget.value),
)
}
></input>
</ListItem>
<ListItem
title={Locale.Settings.Access.DeepSeek.ApiKey.Title}
subTitle={Locale.Settings.Access.DeepSeek.ApiKey.SubTitle}
>
<PasswordInput
aria-label={Locale.Settings.Access.DeepSeek.ApiKey.Title}
value={accessStore.deepseekApiKey}
type="text"
placeholder={Locale.Settings.Access.DeepSeek.ApiKey.Placeholder}
onChange={(e) => {
accessStore.update(
(access) => (access.deepseekApiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
const XAIConfigComponent = accessStore.provider === ServiceProvider.XAI && (
<>
<ListItem
title={Locale.Settings.Access.XAI.Endpoint.Title}
subTitle={
Locale.Settings.Access.XAI.Endpoint.SubTitle + XAI.ExampleEndpoint
}
>
<input
aria-label={Locale.Settings.Access.XAI.Endpoint.Title}
type="text"
value={accessStore.xaiUrl}
placeholder={XAI.ExampleEndpoint}
onChange={(e) =>
accessStore.update(
(access) => (access.xaiUrl = e.currentTarget.value),
)
}
></input>
</ListItem>
<ListItem
title={Locale.Settings.Access.XAI.ApiKey.Title}
subTitle={Locale.Settings.Access.XAI.ApiKey.SubTitle}
>
<PasswordInput
aria-label={Locale.Settings.Access.XAI.ApiKey.Title}
value={accessStore.xaiApiKey}
type="text"
placeholder={Locale.Settings.Access.XAI.ApiKey.Placeholder}
onChange={(e) => {
accessStore.update(
(access) => (access.xaiApiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
const chatglmConfigComponent = accessStore.provider ===
ServiceProvider.ChatGLM && (
<>
<ListItem
title={Locale.Settings.Access.ChatGLM.Endpoint.Title}
subTitle={
Locale.Settings.Access.ChatGLM.Endpoint.SubTitle +
ChatGLM.ExampleEndpoint
}
>
<input
aria-label={Locale.Settings.Access.ChatGLM.Endpoint.Title}
type="text"
value={accessStore.chatglmUrl}
placeholder={ChatGLM.ExampleEndpoint}
onChange={(e) =>
accessStore.update(
(access) => (access.chatglmUrl = e.currentTarget.value),
)
}
></input>
</ListItem>
<ListItem
title={Locale.Settings.Access.ChatGLM.ApiKey.Title}
subTitle={Locale.Settings.Access.ChatGLM.ApiKey.SubTitle}
>
<PasswordInput
aria-label={Locale.Settings.Access.ChatGLM.ApiKey.Title}
value={accessStore.chatglmApiKey}
type="text"
placeholder={Locale.Settings.Access.ChatGLM.ApiKey.Placeholder}
onChange={(e) => {
accessStore.update(
(access) => (access.chatglmApiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
const siliconflowConfigComponent = accessStore.provider ===
ServiceProvider.SiliconFlow && (
<>
<ListItem
title={Locale.Settings.Access.SiliconFlow.Endpoint.Title}
subTitle={
Locale.Settings.Access.SiliconFlow.Endpoint.SubTitle +
SiliconFlow.ExampleEndpoint
}
>
<input
aria-label={Locale.Settings.Access.SiliconFlow.Endpoint.Title}
type="text"
value={accessStore.siliconflowUrl}
placeholder={SiliconFlow.ExampleEndpoint}
onChange={(e) =>
accessStore.update(
(access) => (access.siliconflowUrl = e.currentTarget.value),
)
}
></input>
</ListItem>
<ListItem
title={Locale.Settings.Access.SiliconFlow.ApiKey.Title}
subTitle={Locale.Settings.Access.SiliconFlow.ApiKey.SubTitle}
>
<PasswordInput
aria-label={Locale.Settings.Access.SiliconFlow.ApiKey.Title}
value={accessStore.siliconflowApiKey}
type="text"
placeholder={Locale.Settings.Access.SiliconFlow.ApiKey.Placeholder}
onChange={(e) => {
accessStore.update(
(access) => (access.siliconflowApiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
const stabilityConfigComponent = accessStore.provider === const stabilityConfigComponent = accessStore.provider ===
ServiceProvider.Stability && ( ServiceProvider.Stability && (
<> <>
@ -1357,9 +1523,17 @@ export function Settings() {
{checkingUpdate ? ( {checkingUpdate ? (
<LoadingIcon /> <LoadingIcon />
) : hasNewVersion ? ( ) : hasNewVersion ? (
clientConfig?.isApp ? (
<IconButton
icon={<ResetIcon></ResetIcon>}
text={Locale.Settings.Update.GoToUpdate}
onClick={() => clientUpdate()}
/>
) : (
<Link href={updateUrl} target="_blank" className="link"> <Link href={updateUrl} target="_blank" className="link">
{Locale.Settings.Update.GoToUpdate} {Locale.Settings.Update.GoToUpdate}
</Link> </Link>
)
) : ( ) : (
<IconButton <IconButton
icon={<ResetIcon></ResetIcon>} icon={<ResetIcon></ResetIcon>}
@ -1509,6 +1683,22 @@ export function Settings() {
} }
></input> ></input>
</ListItem> </ListItem>
<ListItem
title={Locale.Mask.Config.CodeFold.Title}
subTitle={Locale.Mask.Config.CodeFold.SubTitle}
>
<input
aria-label={Locale.Mask.Config.CodeFold.Title}
type="checkbox"
checked={config.enableCodeFold}
data-testid="enable-code-fold-checkbox"
onChange={(e) =>
updateConfig(
(config) => (config.enableCodeFold = e.currentTarget.checked),
)
}
></input>
</ListItem>
</List> </List>
<SyncItems /> <SyncItems />
@ -1626,8 +1816,12 @@ export function Settings() {
{alibabaConfigComponent} {alibabaConfigComponent}
{tencentConfigComponent} {tencentConfigComponent}
{moonshotConfigComponent} {moonshotConfigComponent}
{deepseekConfigComponent}
{stabilityConfigComponent} {stabilityConfigComponent}
{lflytekConfigComponent} {lflytekConfigComponent}
{XAIConfigComponent}
{chatglmConfigComponent}
{siliconflowConfigComponent}
</> </>
)} )}
</> </>
@ -1662,9 +1856,11 @@ export function Settings() {
<ListItem <ListItem
title={Locale.Settings.Access.CustomModel.Title} title={Locale.Settings.Access.CustomModel.Title}
subTitle={Locale.Settings.Access.CustomModel.SubTitle} subTitle={Locale.Settings.Access.CustomModel.SubTitle}
vertical={true}
> >
<input <input
aria-label={Locale.Settings.Access.CustomModel.Title} aria-label={Locale.Settings.Access.CustomModel.Title}
style={{ width: "100%", maxWidth: "unset", textAlign: "left" }}
type="text" type="text"
value={config.customModels} value={config.customModels}
placeholder="model1,model2,model3" placeholder="model1,model2,model3"
@ -1691,7 +1887,18 @@ export function Settings() {
{shouldShowPromptModal && ( {shouldShowPromptModal && (
<UserPromptModal onClose={() => setShowPromptModal(false)} /> <UserPromptModal onClose={() => setShowPromptModal(false)} />
)} )}
<List>
<RealtimeConfigList
realtimeConfig={config.realtimeConfig}
updateConfig={(updater) => {
const realtimeConfig = { ...config.realtimeConfig };
updater(realtimeConfig);
config.update(
(config) => (config.realtimeConfig = realtimeConfig),
);
}}
/>
</List>
<List> <List>
<TTSConfigList <TTSConfigList
ttsConfig={config.ttsConfig} ttsConfig={config.ttsConfig}

View File

@ -1,4 +1,4 @@
import React, { useEffect, useRef, useMemo, useState, Fragment } from "react"; import React, { Fragment, useEffect, useMemo, useRef, useState } from "react";
import styles from "./home.module.scss"; import styles from "./home.module.scss";
@ -9,6 +9,7 @@ import ChatGptIcon from "../icons/chatgpt.svg";
import AddIcon from "../icons/add.svg"; import AddIcon from "../icons/add.svg";
import DeleteIcon from "../icons/delete.svg"; import DeleteIcon from "../icons/delete.svg";
import MaskIcon from "../icons/mask.svg"; import MaskIcon from "../icons/mask.svg";
import McpIcon from "../icons/mcp.svg";
import DragIcon from "../icons/drag.svg"; import DragIcon from "../icons/drag.svg";
import DiscoveryIcon from "../icons/discovery.svg"; import DiscoveryIcon from "../icons/discovery.svg";
@ -22,14 +23,21 @@ import {
MIN_SIDEBAR_WIDTH, MIN_SIDEBAR_WIDTH,
NARROW_SIDEBAR_WIDTH, NARROW_SIDEBAR_WIDTH,
Path, Path,
PLUGINS,
REPO_URL, REPO_URL,
} from "../constant"; } from "../constant";
import { Link, useNavigate } from "react-router-dom"; import { Link, useNavigate } from "react-router-dom";
import { isIOS, useMobileScreen } from "../utils"; import { isIOS, useMobileScreen } from "../utils";
import dynamic from "next/dynamic"; import dynamic from "next/dynamic";
import { showConfirm, Selector } from "./ui-lib"; import { Selector, showConfirm } from "./ui-lib";
import clsx from "clsx";
import { isMcpEnabled } from "../mcp/actions";
const DISCOVERY = [
{ name: Locale.Plugin.Name, path: Path.Plugins },
{ name: "Stable Diffusion", path: Path.Sd },
{ name: Locale.SearchChat.Page.Title, path: Path.SearchChat },
];
const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, { const ChatList = dynamic(async () => (await import("./chat-list")).ChatList, {
loading: () => null, loading: () => null,
@ -127,6 +135,7 @@ export function useDragSideBar() {
shouldNarrow, shouldNarrow,
}; };
} }
export function SideBarContainer(props: { export function SideBarContainer(props: {
children: React.ReactNode; children: React.ReactNode;
onDragStart: (e: MouseEvent) => void; onDragStart: (e: MouseEvent) => void;
@ -141,9 +150,9 @@ export function SideBarContainer(props: {
const { children, className, onDragStart, shouldNarrow } = props; const { children, className, onDragStart, shouldNarrow } = props;
return ( return (
<div <div
className={`${styles.sidebar} ${className} ${ className={clsx(styles.sidebar, className, {
shouldNarrow && styles["narrow-sidebar"] [styles["narrow-sidebar"]]: shouldNarrow,
}`} })}
style={{ style={{
// #3016 disable transition on ios mobile screen // #3016 disable transition on ios mobile screen
transition: isMobileScreen && isIOSMobile ? "none" : undefined, transition: isMobileScreen && isIOSMobile ? "none" : undefined,
@ -165,18 +174,24 @@ export function SideBarHeader(props: {
subTitle?: string | React.ReactNode; subTitle?: string | React.ReactNode;
logo?: React.ReactNode; logo?: React.ReactNode;
children?: React.ReactNode; children?: React.ReactNode;
shouldNarrow?: boolean;
}) { }) {
const { title, subTitle, logo, children } = props; const { title, subTitle, logo, children, shouldNarrow } = props;
return ( return (
<Fragment> <Fragment>
<div className={styles["sidebar-header"]} data-tauri-drag-region> <div
className={clsx(styles["sidebar-header"], {
[styles["sidebar-header-narrow"]]: shouldNarrow,
})}
data-tauri-drag-region
>
<div className={styles["sidebar-title-container"]}> <div className={styles["sidebar-title-container"]}>
<div className={styles["sidebar-title"]} data-tauri-drag-region> <div className={styles["sidebar-title"]} data-tauri-drag-region>
{title} {title}
</div> </div>
<div className={styles["sidebar-sub-title"]}>{subTitle}</div> <div className={styles["sidebar-sub-title"]}>{subTitle}</div>
</div> </div>
<div className={styles["sidebar-logo"] + " no-dark"}>{logo}</div> <div className={clsx(styles["sidebar-logo"], "no-dark")}>{logo}</div>
</div> </div>
{children} {children}
</Fragment> </Fragment>
@ -212,10 +227,21 @@ export function SideBarTail(props: {
export function SideBar(props: { className?: string }) { export function SideBar(props: { className?: string }) {
useHotKey(); useHotKey();
const { onDragStart, shouldNarrow } = useDragSideBar(); const { onDragStart, shouldNarrow } = useDragSideBar();
const [showPluginSelector, setShowPluginSelector] = useState(false); const [showDiscoverySelector, setshowDiscoverySelector] = useState(false);
const navigate = useNavigate(); const navigate = useNavigate();
const config = useAppConfig(); const config = useAppConfig();
const chatStore = useChatStore(); const chatStore = useChatStore();
const [mcpEnabled, setMcpEnabled] = useState(false);
useEffect(() => {
// 检查 MCP 是否启用
const checkMcpStatus = async () => {
const enabled = await isMcpEnabled();
setMcpEnabled(enabled);
console.log("[SideBar] MCP enabled:", enabled);
};
checkMcpStatus();
}, []);
return ( return (
<SideBarContainer <SideBarContainer
@ -227,6 +253,7 @@ export function SideBar(props: { className?: string }) {
title="NextChat" title="NextChat"
subTitle="Build your own AI assistant." subTitle="Build your own AI assistant."
logo={<ChatGptIcon />} logo={<ChatGptIcon />}
shouldNarrow={shouldNarrow}
> >
<div className={styles["sidebar-header-bar"]}> <div className={styles["sidebar-header-bar"]}>
<IconButton <IconButton
@ -242,25 +269,36 @@ export function SideBar(props: { className?: string }) {
}} }}
shadow shadow
/> />
{mcpEnabled && (
<IconButton
icon={<McpIcon />}
text={shouldNarrow ? undefined : Locale.Mcp.Name}
className={styles["sidebar-bar-button"]}
onClick={() => {
navigate(Path.McpMarket, { state: { fromHome: true } });
}}
shadow
/>
)}
<IconButton <IconButton
icon={<DiscoveryIcon />} icon={<DiscoveryIcon />}
text={shouldNarrow ? undefined : Locale.Discovery.Name} text={shouldNarrow ? undefined : Locale.Discovery.Name}
className={styles["sidebar-bar-button"]} className={styles["sidebar-bar-button"]}
onClick={() => setShowPluginSelector(true)} onClick={() => setshowDiscoverySelector(true)}
shadow shadow
/> />
</div> </div>
{showPluginSelector && ( {showDiscoverySelector && (
<Selector <Selector
items={[ items={[
...PLUGINS.map((item) => { ...DISCOVERY.map((item) => {
return { return {
title: item.name, title: item.name,
value: item.path, value: item.path,
}; };
}), }),
]} ]}
onClose={() => setShowPluginSelector(false)} onClose={() => setshowDiscoverySelector(false)}
onSelection={(s) => { onSelection={(s) => {
navigate(s[0], { state: { fromHome: true } }); navigate(s[0], { state: { fromHome: true } });
}} }}
@ -279,7 +317,7 @@ export function SideBar(props: { className?: string }) {
<SideBarTail <SideBarTail
primaryAction={ primaryAction={
<> <>
<div className={styles["sidebar-action"] + " " + styles.mobile}> <div className={clsx(styles["sidebar-action"], styles.mobile)}>
<IconButton <IconButton
icon={<DeleteIcon />} icon={<DeleteIcon />}
onClick={async () => { onClick={async () => {

View File

@ -23,6 +23,8 @@ import React, {
useRef, useRef,
} from "react"; } from "react";
import { IconButton } from "./button"; import { IconButton } from "./button";
import { Avatar } from "./emoji";
import clsx from "clsx";
export function Popover(props: { export function Popover(props: {
children: JSX.Element; children: JSX.Element;
@ -45,7 +47,7 @@ export function Popover(props: {
export function Card(props: { children: JSX.Element[]; className?: string }) { export function Card(props: { children: JSX.Element[]; className?: string }) {
return ( return (
<div className={styles.card + " " + props.className}>{props.children}</div> <div className={clsx(styles.card, props.className)}>{props.children}</div>
); );
} }
@ -60,11 +62,13 @@ export function ListItem(props: {
}) { }) {
return ( return (
<div <div
className={ className={clsx(
styles["list-item"] + styles["list-item"],
` ${props.vertical ? styles["vertical"] : ""} ` + {
` ${props.className || ""}` [styles["vertical"]]: props.vertical,
} },
props.className,
)}
onClick={props.onClick} onClick={props.onClick}
> >
<div className={styles["list-header"]}> <div className={styles["list-header"]}>
@ -135,9 +139,9 @@ export function Modal(props: ModalProps) {
return ( return (
<div <div
className={ className={clsx(styles["modal-container"], {
styles["modal-container"] + ` ${isMax && styles["modal-container-max"]}` [styles["modal-container-max"]]: isMax,
} })}
> >
<div className={styles["modal-header"]}> <div className={styles["modal-header"]}>
<div className={styles["modal-title"]}>{props.title}</div> <div className={styles["modal-title"]}>{props.title}</div>
@ -260,7 +264,7 @@ export function Input(props: InputProps) {
return ( return (
<textarea <textarea
{...props} {...props}
className={`${styles["input"]} ${props.className}`} className={clsx(styles["input"], props.className)}
></textarea> ></textarea>
); );
} }
@ -301,9 +305,13 @@ export function Select(
const { className, children, align, ...otherProps } = props; const { className, children, align, ...otherProps } = props;
return ( return (
<div <div
className={`${styles["select-with-icon"]} ${ className={clsx(
align === "left" ? styles["left-align-option"] : "" styles["select-with-icon"],
} ${className}`} {
[styles["left-align-option"]]: align === "left",
},
className,
)}
> >
<select className={styles["select-with-icon-select"]} {...otherProps}> <select className={styles["select-with-icon-select"]} {...otherProps}>
{children} {children}
@ -509,12 +517,13 @@ export function Selector<T>(props: {
const selected = selectedValues.includes(item.value); const selected = selectedValues.includes(item.value);
return ( return (
<ListItem <ListItem
className={`${styles["selector-item"]} ${ className={clsx(styles["selector-item"], {
item.disable && styles["selector-item-disabled"] [styles["selector-item-disabled"]]: item.disable,
}`} })}
key={i} key={i}
title={item.title} title={item.title}
subTitle={item.subTitle} subTitle={item.subTitle}
icon={<Avatar model={item.value as string} />}
onClick={(e) => { onClick={(e) => {
if (item.disable) { if (item.disable) {
e.stopPropagation(); e.stopPropagation();

View File

@ -0,0 +1 @@
export * from "./voice-print";

View File

@ -0,0 +1,11 @@
.voice-print {
width: 100%;
height: 60px;
margin: 20px 0;
canvas {
width: 100%;
height: 100%;
filter: brightness(1.2); // 增加整体亮度
}
}

View File

@ -0,0 +1,180 @@
import { useEffect, useRef, useCallback } from "react";
import styles from "./voice-print.module.scss";
interface VoicePrintProps {
frequencies?: Uint8Array;
isActive?: boolean;
}
export function VoicePrint({ frequencies, isActive }: VoicePrintProps) {
// Canvas引用用于获取绘图上下文
const canvasRef = useRef<HTMLCanvasElement>(null);
// 存储历史频率数据,用于平滑处理
const historyRef = useRef<number[][]>([]);
// 控制保留的历史数据帧数,影响平滑度
const historyLengthRef = useRef(10);
// 存储动画帧ID用于清理
const animationFrameRef = useRef<number>();
/**
*
* 使FIFO队列维护固定长度的历史记录
*/
const updateHistory = useCallback((freqArray: number[]) => {
historyRef.current.push(freqArray);
if (historyRef.current.length > historyLengthRef.current) {
historyRef.current.shift();
}
}, []);
useEffect(() => {
const canvas = canvasRef.current;
if (!canvas) return;
const ctx = canvas.getContext("2d");
if (!ctx) return;
/**
* DPI屏幕显示
* canvas实际渲染分辨率
*/
const dpr = window.devicePixelRatio || 1;
canvas.width = canvas.offsetWidth * dpr;
canvas.height = canvas.offsetHeight * dpr;
ctx.scale(dpr, dpr);
/**
*
* 使requestAnimationFrame实现平滑动画
*
* 1.
* 2.
* 3.
* 4.
*/
const draw = () => {
// 清空画布
ctx.clearRect(0, 0, canvas.width, canvas.height);
if (!frequencies || !isActive) {
historyRef.current = [];
return;
}
const freqArray = Array.from(frequencies);
updateHistory(freqArray);
// 绘制声纹
const points: [number, number][] = [];
const centerY = canvas.height / 2;
const width = canvas.width;
const sliceWidth = width / (frequencies.length - 1);
// 绘制主波形
ctx.beginPath();
ctx.moveTo(0, centerY);
/**
*
* 1. 使
* 2.
* 3. 使线使线
* 4.
*/
for (let i = 0; i < frequencies.length; i++) {
const x = i * sliceWidth;
let avgFrequency = frequencies[i];
/**
*
* 1.
* 2.
* 3.
*/
if (historyRef.current.length > 0) {
const historicalValues = historyRef.current.map((h) => h[i] || 0);
avgFrequency =
(avgFrequency + historicalValues.reduce((a, b) => a + b, 0)) /
(historyRef.current.length + 1);
}
/**
*
* 1. 0-1
* 2.
* 3. 使线
*/
const normalized = avgFrequency / 255.0;
const height = normalized * (canvas.height / 2);
const y = centerY + height * Math.sin(i * 0.2 + Date.now() * 0.002);
points.push([x, y]);
if (i === 0) {
ctx.moveTo(x, y);
} else {
// 使用贝塞尔曲线使波形更平滑
const prevPoint = points[i - 1];
const midX = (prevPoint[0] + x) / 2;
ctx.quadraticCurveTo(
prevPoint[0],
prevPoint[1],
midX,
(prevPoint[1] + y) / 2,
);
}
}
// 绘制对称的下半部分
for (let i = points.length - 1; i >= 0; i--) {
const [x, y] = points[i];
const symmetricY = centerY - (y - centerY);
if (i === points.length - 1) {
ctx.lineTo(x, symmetricY);
} else {
const nextPoint = points[i + 1];
const midX = (nextPoint[0] + x) / 2;
ctx.quadraticCurveTo(
nextPoint[0],
centerY - (nextPoint[1] - centerY),
midX,
centerY - ((nextPoint[1] + y) / 2 - centerY),
);
}
}
ctx.closePath();
/**
*
*
* 使
*/
const gradient = ctx.createLinearGradient(0, 0, canvas.width, 0);
gradient.addColorStop(0, "rgba(100, 180, 255, 0.95)");
gradient.addColorStop(0.5, "rgba(140, 200, 255, 0.9)");
gradient.addColorStop(1, "rgba(180, 220, 255, 0.95)");
ctx.fillStyle = gradient;
ctx.fill();
animationFrameRef.current = requestAnimationFrame(draw);
};
// 启动动画循环
draw();
// 清理函数:在组件卸载时取消动画
return () => {
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, [frequencies, isActive, updateHistory]);
return (
<div className={styles["voice-print"]}>
<canvas ref={canvasRef} />
</div>
);
}

View File

@ -1,5 +1,6 @@
import md5 from "spark-md5"; import md5 from "spark-md5";
import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant"; import { DEFAULT_MODELS, DEFAULT_GA_ID } from "../constant";
import { isGPT4Model } from "../utils/model";
declare global { declare global {
namespace NodeJS { namespace NodeJS {
@ -22,6 +23,7 @@ declare global {
DISABLE_FAST_LINK?: string; // disallow parse settings from url or not DISABLE_FAST_LINK?: string; // disallow parse settings from url or not
CUSTOM_MODELS?: string; // to control custom models CUSTOM_MODELS?: string; // to control custom models
DEFAULT_MODEL?: string; // to control default model in every new chat window DEFAULT_MODEL?: string; // to control default model in every new chat window
VISION_MODELS?: string; // to control vision models
// stability only // stability only
STABILITY_URL?: string; STABILITY_URL?: string;
@ -71,8 +73,25 @@ declare global {
IFLYTEK_API_KEY?: string; IFLYTEK_API_KEY?: string;
IFLYTEK_API_SECRET?: string; IFLYTEK_API_SECRET?: string;
DEEPSEEK_URL?: string;
DEEPSEEK_API_KEY?: string;
// xai only
XAI_URL?: string;
XAI_API_KEY?: string;
// chatglm only
CHATGLM_URL?: string;
CHATGLM_API_KEY?: string;
// siliconflow only
SILICONFLOW_URL?: string;
SILICONFLOW_API_KEY?: string;
// custom template for preprocessing user input // custom template for preprocessing user input
DEFAULT_INPUT_TEMPLATE?: string; DEFAULT_INPUT_TEMPLATE?: string;
ENABLE_MCP?: string; // enable mcp functionality
} }
} }
} }
@ -116,23 +135,17 @@ export const getServerSideConfig = () => {
const disableGPT4 = !!process.env.DISABLE_GPT4; const disableGPT4 = !!process.env.DISABLE_GPT4;
let customModels = process.env.CUSTOM_MODELS ?? ""; let customModels = process.env.CUSTOM_MODELS ?? "";
let defaultModel = process.env.DEFAULT_MODEL ?? ""; let defaultModel = process.env.DEFAULT_MODEL ?? "";
let visionModels = process.env.VISION_MODELS ?? "";
if (disableGPT4) { if (disableGPT4) {
if (customModels) customModels += ","; if (customModels) customModels += ",";
customModels += DEFAULT_MODELS.filter( customModels += DEFAULT_MODELS.filter((m) => isGPT4Model(m.name))
(m) =>
(m.name.startsWith("gpt-4") || m.name.startsWith("chatgpt-4o")) &&
!m.name.startsWith("gpt-4o-mini"),
)
.map((m) => "-" + m.name) .map((m) => "-" + m.name)
.join(","); .join(",");
if ( if (defaultModel && isGPT4Model(defaultModel)) {
(defaultModel.startsWith("gpt-4") ||
defaultModel.startsWith("chatgpt-4o")) &&
!defaultModel.startsWith("gpt-4o-mini")
)
defaultModel = ""; defaultModel = "";
} }
}
const isStability = !!process.env.STABILITY_API_KEY; const isStability = !!process.env.STABILITY_API_KEY;
@ -146,6 +159,10 @@ export const getServerSideConfig = () => {
const isAlibaba = !!process.env.ALIBABA_API_KEY; const isAlibaba = !!process.env.ALIBABA_API_KEY;
const isMoonshot = !!process.env.MOONSHOT_API_KEY; const isMoonshot = !!process.env.MOONSHOT_API_KEY;
const isIflytek = !!process.env.IFLYTEK_API_KEY; const isIflytek = !!process.env.IFLYTEK_API_KEY;
const isDeepSeek = !!process.env.DEEPSEEK_API_KEY;
const isXAI = !!process.env.XAI_API_KEY;
const isChatGLM = !!process.env.CHATGLM_API_KEY;
const isSiliconFlow = !!process.env.SILICONFLOW_API_KEY;
// const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? "";
// const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim());
// const randomIndex = Math.floor(Math.random() * apiKeys.length); // const randomIndex = Math.floor(Math.random() * apiKeys.length);
@ -208,11 +225,27 @@ export const getServerSideConfig = () => {
iflytekApiKey: process.env.IFLYTEK_API_KEY, iflytekApiKey: process.env.IFLYTEK_API_KEY,
iflytekApiSecret: process.env.IFLYTEK_API_SECRET, iflytekApiSecret: process.env.IFLYTEK_API_SECRET,
isDeepSeek,
deepseekUrl: process.env.DEEPSEEK_URL,
deepseekApiKey: getApiKey(process.env.DEEPSEEK_API_KEY),
isXAI,
xaiUrl: process.env.XAI_URL,
xaiApiKey: getApiKey(process.env.XAI_API_KEY),
isChatGLM,
chatglmUrl: process.env.CHATGLM_URL,
chatglmApiKey: getApiKey(process.env.CHATGLM_API_KEY),
cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID, cloudflareAccountId: process.env.CLOUDFLARE_ACCOUNT_ID,
cloudflareKVNamespaceId: process.env.CLOUDFLARE_KV_NAMESPACE_ID, cloudflareKVNamespaceId: process.env.CLOUDFLARE_KV_NAMESPACE_ID,
cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY), cloudflareKVApiKey: getApiKey(process.env.CLOUDFLARE_KV_API_KEY),
cloudflareKVTTL: process.env.CLOUDFLARE_KV_TTL, cloudflareKVTTL: process.env.CLOUDFLARE_KV_TTL,
isSiliconFlow,
siliconFlowUrl: process.env.SILICONFLOW_URL,
siliconFlowApiKey: getApiKey(process.env.SILICONFLOW_API_KEY),
gtmId: process.env.GTM_ID, gtmId: process.env.GTM_ID,
gaId: process.env.GA_ID || DEFAULT_GA_ID, gaId: process.env.GA_ID || DEFAULT_GA_ID,
@ -229,6 +262,8 @@ export const getServerSideConfig = () => {
disableFastLink: !!process.env.DISABLE_FAST_LINK, disableFastLink: !!process.env.DISABLE_FAST_LINK,
customModels, customModels,
defaultModel, defaultModel,
visionModels,
allowedWebDavEndpoints, allowedWebDavEndpoints,
enableMcp: process.env.ENABLE_MCP === "true",
}; };
}; };

View File

@ -11,7 +11,6 @@ export const RUNTIME_CONFIG_DOM = "danger-runtime-config";
export const STABILITY_BASE_URL = "https://api.stability.ai"; export const STABILITY_BASE_URL = "https://api.stability.ai";
export const DEFAULT_API_HOST = "https://api.nextchat.dev";
export const OPENAI_BASE_URL = "https://api.openai.com"; export const OPENAI_BASE_URL = "https://api.openai.com";
export const ANTHROPIC_BASE_URL = "https://api.anthropic.com"; export const ANTHROPIC_BASE_URL = "https://api.anthropic.com";
@ -29,6 +28,14 @@ export const TENCENT_BASE_URL = "https://hunyuan.tencentcloudapi.com";
export const MOONSHOT_BASE_URL = "https://api.moonshot.cn"; export const MOONSHOT_BASE_URL = "https://api.moonshot.cn";
export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com"; export const IFLYTEK_BASE_URL = "https://spark-api-open.xf-yun.com";
export const DEEPSEEK_BASE_URL = "https://api.deepseek.com";
export const XAI_BASE_URL = "https://api.x.ai";
export const CHATGLM_BASE_URL = "https://open.bigmodel.cn";
export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn";
export const CACHE_URL_PREFIX = "/api/cache"; export const CACHE_URL_PREFIX = "/api/cache";
export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`; export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`;
@ -44,6 +51,7 @@ export enum Path {
SdNew = "/sd-new", SdNew = "/sd-new",
Artifacts = "/artifacts", Artifacts = "/artifacts",
SearchChat = "/search-chat", SearchChat = "/search-chat",
McpMarket = "/mcp-market",
} }
export enum ApiPath { export enum ApiPath {
@ -60,6 +68,10 @@ export enum ApiPath {
Iflytek = "/api/iflytek", Iflytek = "/api/iflytek",
Stability = "/api/stability", Stability = "/api/stability",
Artifacts = "/api/artifacts", Artifacts = "/api/artifacts",
XAI = "/api/xai",
ChatGLM = "/api/chatglm",
DeepSeek = "/api/deepseek",
SiliconFlow = "/api/siliconflow",
} }
export enum SlotID { export enum SlotID {
@ -82,6 +94,7 @@ export enum StoreKey {
Update = "chat-update", Update = "chat-update",
Sync = "sync", Sync = "sync",
SdList = "sd-list", SdList = "sd-list",
Mcp = "mcp-store",
} }
export const DEFAULT_SIDEBAR_WIDTH = 300; export const DEFAULT_SIDEBAR_WIDTH = 300;
@ -97,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
export const STORAGE_KEY = "chatgpt-next-web"; export const STORAGE_KEY = "chatgpt-next-web";
export const REQUEST_TIMEOUT_MS = 60000; export const REQUEST_TIMEOUT_MS = 60000;
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
@ -112,6 +126,10 @@ export enum ServiceProvider {
Moonshot = "Moonshot", Moonshot = "Moonshot",
Stability = "Stability", Stability = "Stability",
Iflytek = "Iflytek", Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
SiliconFlow = "SiliconFlow",
} }
// Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings
@ -134,6 +152,10 @@ export enum ModelProvider {
Hunyuan = "Hunyuan", Hunyuan = "Hunyuan",
Moonshot = "Moonshot", Moonshot = "Moonshot",
Iflytek = "Iflytek", Iflytek = "Iflytek",
XAI = "XAI",
ChatGLM = "ChatGLM",
DeepSeek = "DeepSeek",
SiliconFlow = "SiliconFlow",
} }
export const Stability = { export const Stability = {
@ -199,7 +221,12 @@ export const ByteDance = {
export const Alibaba = { export const Alibaba = {
ExampleEndpoint: ALIBABA_BASE_URL, ExampleEndpoint: ALIBABA_BASE_URL,
ChatPath: "v1/services/aigc/text-generation/generation", ChatPath: (modelName: string) => {
if (modelName.includes("vl") || modelName.includes("omni")) {
return "v1/services/aigc/multimodal-generation/generation";
}
return `v1/services/aigc/text-generation/generation`;
},
}; };
export const Tencent = { export const Tencent = {
@ -216,6 +243,29 @@ export const Iflytek = {
ChatPath: "v1/chat/completions", ChatPath: "v1/chat/completions",
}; };
export const DeepSeek = {
ExampleEndpoint: DEEPSEEK_BASE_URL,
ChatPath: "chat/completions",
};
export const XAI = {
ExampleEndpoint: XAI_BASE_URL,
ChatPath: "v1/chat/completions",
};
export const ChatGLM = {
ExampleEndpoint: CHATGLM_BASE_URL,
ChatPath: "api/paas/v4/chat/completions",
ImagePath: "api/paas/v4/images/generations",
VideoPath: "api/paas/v4/videos/generations",
};
export const SiliconFlow = {
ExampleEndpoint: SILICONFLOW_BASE_URL,
ChatPath: "v1/chat/completions",
ListModelPath: "v1/models?&sub_type=chat",
};
export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
// export const DEFAULT_SYSTEM_TEMPLATE = ` // export const DEFAULT_SYSTEM_TEMPLATE = `
// You are ChatGPT, a large language model trained by {{ServiceProvider}}. // You are ChatGPT, a large language model trained by {{ServiceProvider}}.
@ -234,8 +284,133 @@ Latex inline: \\(x^2\\)
Latex block: $$e=mc^2$$ Latex block: $$e=mc^2$$
`; `;
export const MCP_TOOLS_TEMPLATE = `
[clientId]
{{ clientId }}
[tools]
{{ tools }}
`;
export const MCP_SYSTEM_TEMPLATE = `
You are an AI assistant with access to system tools. Your role is to help users by combining natural language understanding with tool operations when needed.
1. AVAILABLE TOOLS:
{{ MCP_TOOLS }}
2. WHEN TO USE TOOLS:
- ALWAYS USE TOOLS when they can help answer user questions
- DO NOT just describe what you could do - TAKE ACTION immediately
- If you're not sure whether to use a tool, USE IT
- Common triggers for tool use:
* Questions about files or directories
* Requests to check, list, or manipulate system resources
* Any query that can be answered with available tools
3. HOW TO USE TOOLS:
A. Tool Call Format:
- Use markdown code blocks with format: \`\`\`json:mcp:{clientId}\`\`\`
- Always include:
* method: "tools/call"Only this method is supported
* params:
- name: must match an available primitive name
- arguments: required parameters for the primitive
B. Response Format:
- Tool responses will come as user messages
- Format: \`\`\`json:mcp-response:{clientId}\`\`\`
- Wait for response before making another tool call
C. Important Rules:
- Only use tools/call method
- Only ONE tool call per message
- ALWAYS TAKE ACTION instead of just describing what you could do
- Include the correct clientId in code block language tag
- Verify arguments match the primitive's requirements
4. INTERACTION FLOW:
A. When user makes a request:
- IMMEDIATELY use appropriate tool if available
- DO NOT ask if user wants you to use the tool
- DO NOT just describe what you could do
B. After receiving tool response:
- Explain results clearly
- Take next appropriate action if needed
C. If tools fail:
- Explain the error
- Try alternative approach immediately
5. EXAMPLE INTERACTION:
good example:
\`\`\`json:mcp:filesystem
{
"method": "tools/call",
"params": {
"name": "list_allowed_directories",
"arguments": {}
}
}
\`\`\`"
\`\`\`json:mcp-response:filesystem
{
"method": "tools/call",
"params": {
"name": "write_file",
"arguments": {
"path": "/Users/river/dev/nextchat/test/joke.txt",
"content": "为什么数学书总是感到忧伤?因为它有太多的问题。"
}
}
}
\`\`\`
follwing is the wrong! mcp json example:
\`\`\`json:mcp:filesystem
{
"method": "write_file",
"params": {
"path": "NextChat_Information.txt",
"content": "1"
}
}
\`\`\`
This is wrong because the method is not tools/call.
\`\`\`{
"method": "search_repositories",
"params": {
"query": "2oeee"
}
}
\`\`\`
This is wrong because the method is not tools/call.!!!!!!!!!!!
the right format is:
\`\`\`json:mcp:filesystem
{
"method": "tools/call",
"params": {
"name": "search_repositories",
"arguments": {
"query": "2oeee"
}
}
}
\`\`\`
please follow the format strictly ONLY use tools/call method!!!!!!!!!!!
`;
export const SUMMARIZE_MODEL = "gpt-4o-mini"; export const SUMMARIZE_MODEL = "gpt-4o-mini";
export const GEMINI_SUMMARIZE_MODEL = "gemini-pro"; export const GEMINI_SUMMARIZE_MODEL = "gemini-pro";
export const DEEPSEEK_SUMMARIZE_MODEL = "deepseek-chat";
export const KnowledgeCutOffDate: Record<string, string> = { export const KnowledgeCutOffDate: Record<string, string> = {
default: "2021-09", default: "2021-09",
@ -245,16 +420,25 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4o": "2023-10", "gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10", "gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10", "gpt-4o-2024-08-06": "2023-10",
"gpt-4o-2024-11-20": "2023-10",
"chatgpt-4o-latest": "2023-10", "chatgpt-4o-latest": "2023-10",
"gpt-4o-mini": "2023-10", "gpt-4o-mini": "2023-10",
"gpt-4o-mini-2024-07-18": "2023-10", "gpt-4o-mini-2024-07-18": "2023-10",
"gpt-4-vision-preview": "2023-04", "gpt-4-vision-preview": "2023-04",
"o1-mini-2024-09-12": "2023-10",
"o1-mini": "2023-10", "o1-mini": "2023-10",
"o1-preview-2024-09-12": "2023-10",
"o1-preview": "2023-10", "o1-preview": "2023-10",
"o1-2024-12-17": "2023-10",
o1: "2023-10",
"o3-mini-2025-01-31": "2023-10",
"o3-mini": "2023-10",
// After improvements, // After improvements,
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously. // it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
"gemini-pro": "2023-12", "gemini-pro": "2023-12",
"gemini-pro-vision": "2023-12", "gemini-pro-vision": "2023-12",
"deepseek-chat": "2024-07",
"deepseek-coder": "2024-07",
}; };
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS"; export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
@ -271,7 +455,27 @@ export const DEFAULT_TTS_VOICES = [
"shimmer", "shimmer",
]; ];
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
/gemini-2\.0/,
/learnlm/,
/qwen-vl/,
/qwen2-vl/,
/gpt-4-turbo(?!.*preview)/, // Matches "gpt-4-turbo" but not "gpt-4-turbo-preview"
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
const openaiModels = [ const openaiModels = [
// As of July 2024, gpt-4o-mini should be used in place of gpt-3.5-turbo,
// as it is cheaper, more capable, multimodal, and just as fast. gpt-3.5-turbo is still available for use in the API.
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125", "gpt-3.5-turbo-0125",
@ -284,6 +488,7 @@ const openaiModels = [
"gpt-4o", "gpt-4o",
"gpt-4o-2024-05-13", "gpt-4o-2024-05-13",
"gpt-4o-2024-08-06", "gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"chatgpt-4o-latest", "chatgpt-4o-latest",
"gpt-4o-mini", "gpt-4o-mini",
"gpt-4o-mini-2024-07-18", "gpt-4o-mini-2024-07-18",
@ -293,13 +498,33 @@ const openaiModels = [
"dall-e-3", "dall-e-3",
"o1-mini", "o1-mini",
"o1-preview", "o1-preview",
"o3-mini",
]; ];
const googleModels = [ const googleModels = [
"gemini-1.0-pro", "gemini-1.0-pro", // Deprecated on 2/15/2025
"gemini-1.5-pro-latest", "gemini-1.5-pro-latest",
"gemini-1.5-pro",
"gemini-1.5-pro-002",
"gemini-1.5-pro-exp-0827",
"gemini-1.5-flash-latest", "gemini-1.5-flash-latest",
"gemini-pro-vision", "gemini-1.5-flash-8b-latest",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-002",
"gemini-1.5-flash-exp-0827",
"learnlm-1.5-pro-experimental",
"gemini-exp-1114",
"gemini-exp-1121",
"gemini-exp-1206",
"gemini-2.0-flash",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
]; ];
const anthropicModels = [ const anthropicModels = [
@ -308,8 +533,15 @@ const anthropicModels = [
"claude-2.1", "claude-2.1",
"claude-3-sonnet-20240229", "claude-3-sonnet-20240229",
"claude-3-opus-20240229", "claude-3-opus-20240229",
"claude-3-opus-latest",
"claude-3-haiku-20240307", "claude-3-haiku-20240307",
"claude-3-5-haiku-20241022",
"claude-3-5-haiku-latest",
"claude-3-5-sonnet-20240620", "claude-3-5-sonnet-20240620",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-latest",
]; ];
const baiduModels = [ const baiduModels = [
@ -343,6 +575,9 @@ const alibabaModes = [
"qwen-max-0403", "qwen-max-0403",
"qwen-max-0107", "qwen-max-0107",
"qwen-max-longcontext", "qwen-max-longcontext",
"qwen-omni-turbo",
"qwen-vl-plus",
"qwen-vl-max",
]; ];
const tencentModels = [ const tencentModels = [
@ -365,6 +600,56 @@ const iflytekModels = [
"4.0Ultra", "4.0Ultra",
]; ];
const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"];
const xAIModes = [
"grok-beta",
"grok-2",
"grok-2-1212",
"grok-2-latest",
"grok-vision-beta",
"grok-2-vision-1212",
"grok-2-vision",
"grok-2-vision-latest",
];
const chatglmModels = [
"glm-4-plus",
"glm-4-0520",
"glm-4",
"glm-4-air",
"glm-4-airx",
"glm-4-long",
"glm-4-flashx",
"glm-4-flash",
"glm-4v-plus",
"glm-4v",
"glm-4v-flash", // free
"cogview-3-plus",
"cogview-3",
"cogview-3-flash", // free
// 目前无法适配轮询任务
// "cogvideox",
// "cogvideox-flash", // free
];
const siliconflowModels = [
"Qwen/Qwen2.5-7B-Instruct",
"Qwen/Qwen2.5-72B-Instruct",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
"deepseek-ai/DeepSeek-V3",
"meta-llama/Llama-3.3-70B-Instruct",
"THUDM/glm-4-9b-chat",
"Pro/deepseek-ai/DeepSeek-R1",
"Pro/deepseek-ai/DeepSeek-V3",
];
let seq = 1000; // 内置的模型序号生成器从1000开始 let seq = 1000; // 内置的模型序号生成器从1000开始
export const DEFAULT_MODELS = [ export const DEFAULT_MODELS = [
...openaiModels.map((name) => ({ ...openaiModels.map((name) => ({
@ -477,6 +762,50 @@ export const DEFAULT_MODELS = [
sorted: 10, sorted: 10,
}, },
})), })),
...xAIModes.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "xai",
providerName: "XAI",
providerType: "xai",
sorted: 11,
},
})),
...chatglmModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "chatglm",
providerName: "ChatGLM",
providerType: "chatglm",
sorted: 12,
},
})),
...deepseekModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "deepseek",
providerName: "DeepSeek",
providerType: "deepseek",
sorted: 13,
},
})),
...siliconflowModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
})),
] as const; ] as const;
export const CHAT_PAGE_SIZE = 15; export const CHAT_PAGE_SIZE = 15;
@ -496,11 +825,6 @@ export const internalAllowedWebDavEndpoints = [
]; ];
export const DEFAULT_GA_ID = "G-89WN60ZK2E"; export const DEFAULT_GA_ID = "G-89WN60ZK2E";
export const PLUGINS = [
{ name: "Plugins", path: Path.Plugins },
{ name: "Stable Diffusion", path: Path.Sd },
{ name: "Search Chat", path: Path.SearchChat },
];
export const SAAS_CHAT_URL = "https://nextchat.dev/chat"; export const SAAS_CHAT_URL = "https://nextchat.club";
export const SAAS_CHAT_UTM_URL = "https://nextchat.dev/chat?utm=github"; export const SAAS_CHAT_UTM_URL = "https://nextchat.club?utm=github";

7
app/global.d.ts vendored
View File

@ -26,6 +26,13 @@ declare interface Window {
isPermissionGranted(): Promise<boolean>; isPermissionGranted(): Promise<boolean>;
sendNotification(options: string | Options): void; sendNotification(options: string | Options): void;
}; };
updater: {
checkUpdate(): Promise<UpdateResult>;
installUpdate(): Promise<void>;
onUpdaterEvent(
handler: (status: UpdateStatusResult) => void,
): Promise<UnlistenFn>;
};
http: { http: {
fetch<T>( fetch<T>(
url: string, url: string,

11
app/icons/headphone.svg Normal file
View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="16" height="16" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M4 28C4 26.8954 4.89543 26 6 26H10V38H6C4.89543 38 4 37.1046 4 36V28Z" fill="none" />
<path d="M38 26H42C43.1046 26 44 26.8954 44 28V36C44 37.1046 43.1046 38 42 38H38V26Z"
fill="none" />
<path
d="M10 36V24C10 16.268 16.268 10 24 10C31.732 10 38 16.268 38 24V36M10 26H6C4.89543 26 4 26.8954 4 28V36C4 37.1046 4.89543 38 6 38H10V26ZM38 26H42C43.1046 26 44 26.8954 44 28V36C44 37.1046 43.1046 38 42 38H38V26Z"
stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
<path d="M16 32H20L22 26L26 38L28 32H32" stroke="#333" stroke-width="4" stroke-linecap="round"
stroke-linejoin="round" />
</svg>

After

Width:  |  Height:  |  Size: 808 B

View File

@ -0,0 +1,14 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>ChatGLM</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-chatglm-fill" x1="-18.756%" x2="70.894%" y1="49.371%" y2="90.944%">
<stop offset="0%" stop-color="#504AF4"></stop>
<stop offset="100%" stop-color="#3485FF"></stop>
</linearGradient>
</defs>
<path d="M9.917 2c4.906 0 10.178 3.947 8.93 10.58-.014.07-.037.14-.057.21l-.003-.277c-.083-3-1.534-8.934-8.87-8.934-3.393 0-8.137 3.054-7.93 8.158-.04 4.778 3.555 8.4 7.95 8.332l.073-.001c1.2-.033 2.763-.429 3.1-1.657.063-.031.26.534.268.598.048.256.112.369.192.34.981-.348 2.286-1.222 1.952-2.38-.176-.61-1.775-.147-1.921-.347.418-.979 2.234-.926 3.153-.716.443.102.657.38 1.012.442.29.052.981-.2.96.242-1.5 3.042-4.893 5.41-8.808 5.41C3.654 22 0 16.574 0 11.737 0 5.947 4.959 2 9.917 2zM9.9 5.3c.484 0 1.125.225 1.38.585 3.669.145 4.313 2.686 4.694 5.444.255 1.838.315 2.3.182 1.387l.083.59c.068.448.554.737.982.516.144-.075.254-.231.328-.47a.2.2 0 01.258-.13l.625.22a.2.2 0 01.124.238 2.172 2.172 0 01-.51.92c-.878.917-2.757.664-3.08-.62-.14-.554-.055-.626-.345-1.242-.292-.621-1.238-.709-1.69-.295-.345.315-.407.805-.406 1.282L12.6 15.9a.9.9 0 01-.9.9h-1.4a.9.9 0 01-.9-.9v-.65a1.15 1.15 0 10-2.3 0v.65a.9.9 0 01-.9.9H4.8a.9.9 0 01-.9-.9l.035-3.239c.012-1.884.356-3.658 2.47-4.134.2-.045.252.13.29.342.025.154.043.252.053.294.701 3.058 1.75 4.299 3.144 3.722l.66-.331.254-.13c.158-.082.25-.131.276-.15.012-.01-.165-.206-.407-.464l-1.012-1.067a8.925 8.925 0 01-.199-.216c-.047-.034-.116.068-.208.306-.074.157-.251.252-.272.326-.013.058.108.298.362.72.164.288.22.508-.31.343-1.04-.8-1.518-2.273-1.684-3.725-.004-.035-.162-1.913-.162-1.913a1.2 1.2 0 011.113-1.281L9.9 5.3zm12.994 8.68c.037.697-.403.704-1.213.591l-1.783-.276c-.265-.053-.385-.099-.313-.147.47-.315 3.268-.93 3.31-.168zm-.915-.083l-.926.042c-.85.077-1.452.24.338.336l.103.003c.815.012 1.264-.359.485-.381zm1.667-3.601h.01c.79.398.067 1.03-.65 1.393-.14.07-.491.176-1.052.315-.241.04-.457.092-.333.16l.01.005c1.952.958-3.123 1.534-2.495 1.285l.38-.148c.68-.266 1.614-.682 1.666-1.337.038-.48 1.253-.442 1.493-.968.048-.106 0-.236-.144-.389-.05-.047-.094-.094-.107-.148-.073-.305.7-.431 1.222-.168zm-2.568-.474c-.135 1.198-2.479 4.192-1.949 2.863l.017-.042c.298-.717.376-2.221 1.337-3.221.25-.26.636.035.595.4zm-7.976-.253c.02-.694 1.002-.968 1.346-.347.01-1.274-1.941-.768-1.346.347z"
fill="url(#lobe-icons-chatglm-fill)" fill-rule="evenodd"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.6 KiB

View File

@ -0,0 +1,8 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Claude</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<path d="M4.709 15.955l4.72-2.647.08-.23-.08-.128H9.2l-.79-.048-2.698-.073-2.339-.097-2.266-.122-.571-.121L0 11.784l.055-.352.48-.321.686.06 1.52.103 2.278.158 1.652.097 2.449.255h.389l.055-.157-.134-.098-.103-.097-2.358-1.596-2.552-1.688-1.336-.972-.724-.491-.364-.462-.158-1.008.656-.722.881.06.225.061.893.686 1.908 1.476 2.491 1.833.365.304.145-.103.019-.073-.164-.274-1.355-2.446-1.446-2.49-.644-1.032-.17-.619a2.97 2.97 0 01-.104-.729L6.283.134 6.696 0l.996.134.42.364.62 1.414 1.002 2.229 1.555 3.03.456.898.243.832.091.255h.158V9.01l.128-1.706.237-2.095.23-2.695.08-.76.376-.91.747-.492.584.28.48.685-.067.444-.286 1.851-.559 2.903-.364 1.942h.212l.243-.242.985-1.306 1.652-2.064.73-.82.85-.904.547-.431h1.033l.76 1.129-.34 1.166-1.064 1.347-.881 1.142-1.264 1.7-.79 1.36.073.11.188-.02 2.856-.606 1.543-.28 1.841-.315.833.388.091.395-.328.807-1.969.486-2.309.462-3.439.813-.042.03.049.061 1.549.146.662.036h1.622l3.02.225.79.522.474.638-.079.485-1.215.62-1.64-.389-3.829-.91-1.312-.329h-.182v.11l1.093 1.068 2.006 1.81 2.509 2.33.127.578-.322.455-.34-.049-2.205-1.657-.851-.747-1.926-1.62h-.128v.17l.444.649 2.345 3.521.122 1.08-.17.353-.608.213-.668-.122-1.374-1.925-1.415-2.167-1.143-1.943-.14.08-.674 7.254-.316.37-.729.28-.607-.461-.322-.747.322-1.476.389-1.924.315-1.53.286-1.9.17-.632-.012-.042-.14.018-1.434 1.967-2.18 2.945-1.726 1.845-.414.164-.717-.37.067-.662.401-.589 2.388-3.036 1.44-1.882.93-1.086-.006-.158h-.055L4.132 18.56l-1.13.146-.487-.456.061-.746.231-.243 1.908-1.312-.006.006z"
fill="#D97757" fill-rule="nonzero"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

@ -0,0 +1,8 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>DeepSeek</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(4, 4)">
<path d="M23.748 4.482c-.254-.124-.364.113-.512.234-.051.039-.094.09-.137.136-.372.397-.806.657-1.373.626-.829-.046-1.537.214-2.163.848-.133-.782-.575-1.248-1.247-1.548-.352-.156-.708-.311-.955-.65-.172-.241-.219-.51-.305-.774-.055-.16-.11-.323-.293-.35-.2-.031-.278.136-.356.276-.313.572-.434 1.202-.422 1.84.027 1.436.633 2.58 1.838 3.393.137.093.172.187.129.323-.082.28-.18.552-.266.833-.055.179-.137.217-.329.14a5.526 5.526 0 01-1.736-1.18c-.857-.828-1.631-1.742-2.597-2.458a11.365 11.365 0 00-.689-.471c-.985-.957.13-1.743.388-1.836.27-.098.093-.432-.779-.428-.872.004-1.67.295-2.687.684a3.055 3.055 0 01-.465.137 9.597 9.597 0 00-2.883-.102c-1.885.21-3.39 1.102-4.497 2.623C.082 8.606-.231 10.684.152 12.85c.403 2.284 1.569 4.175 3.36 5.653 1.858 1.533 3.997 2.284 6.438 2.14 1.482-.085 3.133-.284 4.994-1.86.47.234.962.327 1.78.397.63.059 1.236-.03 1.705-.128.735-.156.684-.837.419-.961-2.155-1.004-1.682-.595-2.113-.926 1.096-1.296 2.746-2.642 3.392-7.003.05-.347.007-.565 0-.845-.004-.17.035-.237.23-.256a4.173 4.173 0 001.545-.475c1.396-.763 1.96-2.015 2.093-3.517.02-.23-.004-.467-.247-.588zM11.581 18c-2.089-1.642-3.102-2.183-3.52-2.16-.392.024-.321.471-.235.763.09.288.207.486.371.739.114.167.192.416-.113.603-.673.416-1.842-.14-1.897-.167-1.361-.802-2.5-1.86-3.301-3.307-.774-1.393-1.224-2.887-1.298-4.482-.02-.386.093-.522.477-.592a4.696 4.696 0 011.529-.039c2.132.312 3.946 1.265 5.468 2.774.868.86 1.525 1.887 2.202 2.891.72 1.066 1.494 2.082 2.48 2.914.348.292.625.514.891.677-.802.09-2.14.11-3.054-.614zm1-6.44a.306.306 0 01.415-.287.302.302 0 01.2.288.306.306 0 01-.31.307.303.303 0 01-.304-.308zm3.11 1.596c-.2.081-.399.151-.59.16a1.245 1.245 0 01-.798-.254c-.274-.23-.47-.358-.552-.758a1.73 1.73 0 01.016-.588c.07-.327-.008-.537-.239-.727-.187-.156-.426-.199-.688-.199a.559.559 0 01-.254-.078c-.11-.054-.2-.19-.114-.358.028-.054.16-.186.192-.21.356-.202.767-.136 1.146.016.352.144.618.408 1.001.782.391.451.462.576.685.914.176.265.336.537.445.848.067.195-.019.354-.25.452z"
fill="#4D6BFE"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@ -0,0 +1,27 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="30" height="30" fill="none"
viewBox="0 0 30 30">
<defs>
<rect id="path_0" width="30" height="30" x="0" y="0"/>
<rect id="path_1" width="20.455" height="20.455" x="0" y="0"/>
</defs>
<g opacity="1" transform="translate(0 0) rotate(0 14.999999999999998 14.999999999999998)">
<rect width="30" height="30" x="0" y="0" fill="#E7F8FF" opacity="1" rx="10"
transform="translate(0 0) rotate(0 14.999999999999998 14.999999999999998)"/>
<mask id="bg-mask-0" fill="#fff">
<use xlink:href="#path_0"/>
</mask>
<g mask="url(#bg-mask-0)">
<g opacity="1"
transform="translate(4.772727272727272 4.772727272727273) rotate(0 10.227272727272725 10.227272727272725)">
<mask id="bg-mask-1" fill="#fff">
<use xlink:href="#path_1"/>
</mask>
<g mask="url(#bg-mask-1)">
<path id="分组 1" fill-rule="evenodd" style="fill:#1f948c"
d="M19.11 8.37L19.11 8.37C19.28 7.85 19.37 7.31 19.37 6.76C19.37 5.86 19.13 4.97 18.66 4.19C17.73 2.59 16 1.6 14.13 1.6C13.76 1.6 13.4 1.64 13.04 1.71C12.06 0.62 10.65 0 9.17 0L9.14 0L9.13 0C6.86 0 4.86 1.44 4.16 3.57C2.7 3.86 1.44 4.76 0.71 6.04C0.24 6.83 0 7.72 0 8.63C0 9.9 0.48 11.14 1.35 12.08C1.17 12.6 1.08 13.15 1.08 13.69C1.08 14.6 1.33 15.49 1.79 16.27C2.92 18.21 5.2 19.21 7.42 18.74C8.4 19.83 9.8 20.45 11.28 20.45L11.31 20.45L11.33 20.45C13.59 20.45 15.6 19.01 16.3 16.88C17.76 16.59 19.01 15.69 19.75 14.41C20.21 13.63 20.45 12.74 20.45 11.83C20.45 10.55 19.97 9.32 19.11 8.37Z M8.94734 18.1579C8.90734 18.1879 8.86734 18.2079 8.82734 18.2279C9.52734 18.8079 10.3973 19.1179 11.3073 19.1179L11.3173 19.1179C13.4573 19.1179 15.1973 17.3979 15.1973 15.2879L15.1973 10.5279C15.1973 10.5079 15.1773 10.4879 15.1573 10.4779L13.4173 9.48792L13.4173 15.2379C13.4173 15.4679 13.2873 15.6879 13.0773 15.8079L8.94734 18.1579Z M8.27654 17.0048L12.4465 14.6248C12.4665 14.6148 12.4765 14.5948 12.4765 14.5748L12.4765 14.5748L12.4765 12.5848L7.43654 15.4548C7.22654 15.5748 6.96654 15.5748 6.75654 15.4548L2.62654 13.1048C2.58654 13.0848 2.53654 13.0448 2.50654 13.0348C2.46654 13.2448 2.44654 13.4648 2.44654 13.6848C2.44654 14.3548 2.62654 15.0148 2.96654 15.6048L2.96654 15.5948C3.66654 16.7848 4.94654 17.5148 6.33654 17.5148C7.01654 17.5148 7.68654 17.3348 8.27654 17.0048Z M3.90324 5.16818C3.90324 5.12818 3.90324 5.06818 3.90324 5.02818C3.05324 5.33818 2.33324 5.92818 1.88324 6.70818L1.88324 6.70818C1.54324 7.28818 1.36324 7.94818 1.36324 8.61818C1.36324 9.98818 2.10324 11.2582 3.30324 11.9482L7.47324 14.3182C7.49324 14.3282 7.51324 14.3282 7.53324 14.3182L9.28324 13.3182L4.24324 10.4482C4.03324 10.3382 3.90324 10.1182 3.90324 9.87818L3.90324 9.87818L3.90324 5.16818Z M17.1561 8.50521L12.9761 6.1252C12.9561 6.1252 12.9361 6.1252 12.9161 6.1352L11.1761 7.1252L16.2161 9.9952C16.4261 10.1152 16.5561 10.3352 16.5561 10.5752C16.5561 10.5752 16.5561 10.5752 16.5561 10.5752L16.5561 15.4252C18.0761 14.8652 19.0961 13.4352 19.0961 11.8252C19.0961 10.4552 18.3561 9.1952 17.1561 8.50521Z M8.01418 5.82927C7.99418 5.83927 7.98418 5.85927 7.98418 5.87927L7.98418 5.87927L7.98418 7.86927L13.0242 4.99927C13.1242 4.93927 13.2442 4.90927 13.3642 4.90927C13.4842 4.90927 13.5942 4.93927 13.7042 4.99927L17.8342 7.34927C17.8742 7.36927 17.9142 7.39927 17.9542 7.41927L17.9542 7.41927C17.9842 7.20927 18.0042 6.98927 18.0042 6.76927C18.0042 4.65927 16.2642 2.93927 14.1242 2.93927C13.4442 2.93927 12.7742 3.11927 12.1842 3.44927L8.01418 5.82927Z M9.14676 1.33731C6.99676 1.33731 5.25676 3.05731 5.25676 5.16731L5.25676 9.92731C5.25676 9.94731 5.27676 9.95731 5.28676 9.96731L7.03676 10.9673L7.03676 5.22731L7.03676 5.21731C7.03676 4.98731 7.16676 4.76731 7.37676 4.64731L11.5068 2.29731C11.5468 2.26731 11.5968 2.23731 11.6268 2.22731C10.9268 1.64731 10.0468 1.33731 9.14676 1.33731Z M7.98345 11.5093L10.2235 12.7793L12.4735 11.5093L12.4735 8.9493L10.2235 7.6693L7.98345 8.9493L7.98345 11.5093Z"
opacity="1" transform="translate(0 0) rotate(0 10.227272727272725 10.227272727272725)"/>
</g>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@ -0,0 +1,14 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Doubao</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<path d="M5.31 15.756c.172-3.75 1.883-5.999 2.549-6.739-3.26 2.058-5.425 5.658-6.358 8.308v1.12C1.501 21.513 4.226 24 7.59 24a6.59 6.59 0 002.2-.375c.353-.12.7-.248 1.039-.378.913-.899 1.65-1.91 2.243-2.992-4.877 2.431-7.974.072-7.763-4.5l.002.001z"
fill="#1E37FC"></path>
<path d="M22.57 10.283c-1.212-.901-4.109-2.404-7.397-2.8.295 3.792.093 8.766-2.1 12.773a12.782 12.782 0 01-2.244 2.992c3.764-1.448 6.746-3.457 8.596-5.219 2.82-2.683 3.353-5.178 3.361-6.66a2.737 2.737 0 00-.216-1.084v-.002z"
fill="#37E1BE"></path>
<path d="M14.303 1.867C12.955.7 11.248 0 9.39 0 7.532 0 5.883.677 4.545 1.807 2.791 3.29 1.627 5.557 1.5 8.125v9.201c.932-2.65 3.097-6.25 6.357-8.307.5-.318 1.025-.595 1.569-.829 1.883-.801 3.878-.932 5.746-.706-.222-2.83-.718-5.002-.87-5.617h.001z"
fill="#A569FF"></path>
<path d="M17.305 4.961a199.47 199.47 0 01-1.08-1.094c-.202-.213-.398-.419-.586-.622l-1.333-1.378c.151.615.648 2.786.869 5.617 3.288.395 6.185 1.898 7.396 2.8-1.306-1.275-3.475-3.487-5.266-5.323z"
fill="#1E37FC"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.3 KiB

View File

@ -0,0 +1,15 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Gemini</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-gemini-fill" x1="0%" x2="68.73%" y1="100%" y2="30.395%">
<stop offset="0%" stop-color="#1C7DFF"></stop>
<stop offset="52.021%" stop-color="#1C69FF"></stop>
<stop offset="100%" stop-color="#F0DCD6"></stop>
</linearGradient>
</defs>
<path d="M12 24A14.304 14.304 0 000 12 14.304 14.304 0 0012 0a14.305 14.305 0 0012 12 14.305 14.305 0 00-12 12"
fill="url(#lobe-icons-gemini-fill)" fill-rule="nonzero"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 807 B

View File

@ -0,0 +1,15 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Gemma</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-gemma-fill" x1="24.419%" x2="75.194%" y1="75.581%" y2="25.194%">
<stop offset="0%" stop-color="#446EFF"></stop>
<stop offset="36.661%" stop-color="#2E96FF"></stop>
<stop offset="83.221%" stop-color="#B1C5FF"></stop>
</linearGradient>
</defs>
<path d="M12.34 5.953a8.233 8.233 0 01-.247-1.125V3.72a8.25 8.25 0 015.562 2.232H12.34zm-.69 0c.113-.373.199-.755.257-1.145V3.72a8.25 8.25 0 00-5.562 2.232h5.304zm-5.433.187h5.373a7.98 7.98 0 01-.267.696 8.41 8.41 0 01-1.76 2.65L6.216 6.14zm-.264-.187H2.977v.187h2.915a8.436 8.436 0 00-2.357 5.767H0v.186h3.535a8.436 8.436 0 002.357 5.767H2.977v.186h2.976v2.977h.187v-2.915a8.436 8.436 0 005.767 2.357V24h.186v-3.535a8.436 8.436 0 005.767-2.357v2.915h.186v-2.977h2.977v-.186h-2.915a8.436 8.436 0 002.357-5.767H24v-.186h-3.535a8.436 8.436 0 00-2.357-5.767h2.915v-.187h-2.977V2.977h-.186v2.915a8.436 8.436 0 00-5.767-2.357V0h-.186v3.535A8.436 8.436 0 006.14 5.892V2.977h-.187v2.976zm6.14 14.326a8.25 8.25 0 005.562-2.233H12.34c-.108.367-.19.743-.247 1.126v1.107zm-.186-1.087a8.015 8.015 0 00-.258-1.146H6.345a8.25 8.25 0 005.562 2.233v-1.087zm-8.186-7.285h1.107a8.23 8.23 0 001.125-.247V6.345a8.25 8.25 0 00-2.232 5.562zm1.087.186H3.72a8.25 8.25 0 002.232 5.562v-5.304a8.012 8.012 0 00-1.145-.258zm15.47-.186a8.25 8.25 0 00-2.232-5.562v5.315c.367.108.743.19 1.126.247h1.107zm-1.086.186c-.39.058-.772.144-1.146.258v5.304a8.25 8.25 0 002.233-5.562h-1.087zm-1.332 5.69V12.41a7.97 7.97 0 00-.696.267 8.409 8.409 0 00-2.65 1.76l3.346 3.346zm0-6.18v-5.45l-.012-.013h-5.451c.076.235.162.468.26.696a8.698 8.698 0 001.819 2.688 8.698 8.698 0 002.688 1.82c.228.097.46.183.696.259zM6.14 17.848V12.41c.235.078.468.167.696.267a8.403 8.403 0 012.688 1.799 8.404 8.404 0 011.799 2.688c.1.228.19.46.267.696H6.152l-.012-.012zm0-6.245V6.326l3.29 3.29a8.716 8.716 0 01-2.594 1.728 8.14 8.14 0 01-.696.259zm6.257 6.257h5.277l-3.29-3.29a8.716 8.716 0 00-1.728 2.594 8.135 8.135 0 00-.259.696zm-2.347-7.81a9.435 9.435 0 01-2.88 1.96 9.14 9.14 0 012.88 1.94 9.14 9.14 0 011.94 2.88 9.435 9.435 0 011.96-2.88 9.14 9.14 0 012.88-1.94 9.435 9.435 0 01-2.88-1.96 9.434 9.434 0 01-1.96-2.88 9.14 9.14 0 01-1.94 2.88z"
fill="url(#lobe-icons-gemma-fill)" fill-rule="evenodd"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.5 KiB

View File

@ -0,0 +1,8 @@
<svg fill="#333" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30"
width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Grok</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<path d="M6.469 8.776L16.512 23h-4.464L2.005 8.776H6.47zm-.004 7.9l2.233 3.164L6.467 23H2l4.465-6.324zM22 2.582V23h-3.659V7.764L22 2.582zM22 1l-9.952 14.095-2.233-3.163L17.533 1H22z"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 486 B

View File

@ -0,0 +1,17 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Hunyuan</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<g fill="none" fill-rule="evenodd">
<circle cx="12" cy="12" fill="#0055E9" r="12"></circle>
<path d="M12 0c.518 0 1.028.033 1.528.096A6.188 6.188 0 0112.12 12.28l-.12.001c-2.99 0-5.242 2.179-5.554 5.11-.223 2.086.353 4.412 2.242 6.146C3.672 22.1 0 17.479 0 12 0 5.373 5.373 0 12 0z"
fill="#A8DFF5"></path>
<path d="M5.286 5a2.438 2.438 0 01.682 3.38c-3.962 5.966-3.215 10.743 2.648 15.136C3.636 22.056 0 17.452 0 12c0-1.787.39-3.482 1.09-5.006.253-.435.525-.872.817-1.311A2.438 2.438 0 015.286 5z"
fill="#0055E9"></path>
<path d="M12.98.04c.272.021.543.053.81.093.583.106 1.117.254 1.538.44 6.638 2.927 8.07 10.052 1.748 15.642a4.125 4.125 0 01-5.822-.358c-1.51-1.706-1.3-4.184.357-5.822.858-.848 3.108-1.223 4.045-2.441 1.257-1.634 2.122-6.009-2.523-7.506L12.98.039z"
fill="#00BCFF"></path>
<path d="M13.528.096A6.187 6.187 0 0112 12.281a5.75 5.75 0 00-1.71.255c.147-.905.595-1.784 1.321-2.501.858-.848 3.108-1.223 4.045-2.441 1.27-1.651 2.14-6.104-2.676-7.554.184.014.367.033.548.056z"
fill="#ECECEE"></path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

@ -0,0 +1,93 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Meta</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-meta-fill-0" x1="75.897%" x2="26.312%" y1="89.199%" y2="12.194%">
<stop offset=".06%" stop-color="#0867DF"></stop>
<stop offset="45.39%" stop-color="#0668E1"></stop>
<stop offset="85.91%" stop-color="#0064E0"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-1" x1="21.67%" x2="97.068%" y1="75.874%" y2="23.985%">
<stop offset="13.23%" stop-color="#0064DF"></stop>
<stop offset="99.88%" stop-color="#0064E0"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-2" x1="38.263%" x2="60.895%" y1="89.127%" y2="16.131%">
<stop offset="1.47%" stop-color="#0072EC"></stop>
<stop offset="68.81%" stop-color="#0064DF"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-3" x1="47.032%" x2="52.15%" y1="90.19%" y2="15.745%">
<stop offset="7.31%" stop-color="#007CF6"></stop>
<stop offset="99.43%" stop-color="#0072EC"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-4" x1="52.155%" x2="47.591%" y1="58.301%" y2="37.004%">
<stop offset="7.31%" stop-color="#007FF9"></stop>
<stop offset="100%" stop-color="#007CF6"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-5" x1="37.689%" x2="61.961%" y1="12.502%" y2="63.624%">
<stop offset="7.31%" stop-color="#007FF9"></stop>
<stop offset="100%" stop-color="#0082FB"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-6" x1="34.808%" x2="62.313%" y1="68.859%" y2="23.174%">
<stop offset="27.99%" stop-color="#007FF8"></stop>
<stop offset="91.41%" stop-color="#0082FB"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-7" x1="43.762%" x2="57.602%" y1="6.235%" y2="98.514%">
<stop offset="0%" stop-color="#0082FB"></stop>
<stop offset="99.95%" stop-color="#0081FA"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-8" x1="60.055%" x2="39.88%" y1="4.661%" y2="69.077%">
<stop offset="6.19%" stop-color="#0081FA"></stop>
<stop offset="100%" stop-color="#0080F9"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-9" x1="30.282%" x2="61.081%" y1="59.32%" y2="33.244%">
<stop offset="0%" stop-color="#027AF3"></stop>
<stop offset="100%" stop-color="#0080F9"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-10" x1="20.433%" x2="82.112%" y1="50.001%" y2="50.001%">
<stop offset="0%" stop-color="#0377EF"></stop>
<stop offset="99.94%" stop-color="#0279F1"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-11" x1="40.303%" x2="72.394%" y1="35.298%" y2="57.811%">
<stop offset=".19%" stop-color="#0471E9"></stop>
<stop offset="100%" stop-color="#0377EF"></stop>
</linearGradient>
<linearGradient id="lobe-icons-meta-fill-12" x1="32.254%" x2="68.003%" y1="19.719%" y2="84.908%">
<stop offset="27.65%" stop-color="#0867DF"></stop>
<stop offset="100%" stop-color="#0471E9"></stop>
</linearGradient>
</defs>
<g fill="none" fill-rule="nonzero">
<path d="M6.897 4h-.024l-.031 2.615h.022c1.715 0 3.046 1.357 5.94 6.246l.175.297.012.02 1.62-2.438-.012-.019a48.763 48.763 0 00-1.098-1.716 28.01 28.01 0 00-1.175-1.629C10.413 4.932 8.812 4 6.896 4z"
fill="url(#lobe-icons-meta-fill-0)"></path>
<path d="M6.873 4C4.95 4.01 3.247 5.258 2.02 7.17a4.352 4.352 0 00-.01.017l2.254 1.231.011-.017c.718-1.083 1.61-1.774 2.568-1.785h.021L6.896 4h-.023z"
fill="url(#lobe-icons-meta-fill-1)"></path>
<path d="M2.019 7.17l-.011.017C1.2 8.447.598 9.995.274 11.664l-.005.022 2.534.6.004-.022c.27-1.467.786-2.828 1.456-3.845l.011-.017L2.02 7.17z"
fill="url(#lobe-icons-meta-fill-2)"></path>
<path d="M2.807 12.264l-2.533-.6-.005.022c-.177.918-.267 1.851-.269 2.786v.023l2.598.233v-.023a12.591 12.591 0 01.21-2.44z"
fill="url(#lobe-icons-meta-fill-3)"></path>
<path d="M2.677 15.537a5.462 5.462 0 01-.079-.813v-.022L0 14.468v.024a8.89 8.89 0 00.146 1.652l2.535-.585a4.106 4.106 0 01-.004-.022z"
fill="url(#lobe-icons-meta-fill-4)"></path>
<path d="M3.27 16.89c-.284-.31-.484-.756-.589-1.328l-.004-.021-2.535.585.004.021c.192 1.01.568 1.85 1.106 2.487l.014.017 2.018-1.745a2.106 2.106 0 01-.015-.016z"
fill="url(#lobe-icons-meta-fill-5)"></path>
<path d="M10.78 9.654c-1.528 2.35-2.454 3.825-2.454 3.825-2.035 3.2-2.739 3.917-3.871 3.917a1.545 1.545 0 01-1.186-.508l-2.017 1.744.014.017C2.01 19.518 3.058 20 4.356 20c1.963 0 3.374-.928 5.884-5.33l1.766-3.13a41.283 41.283 0 00-1.227-1.886z"
fill="#0082FB"></path>
<path d="M13.502 5.946l-.016.016c-.4.43-.786.908-1.16 1.416.378.483.768 1.024 1.175 1.63.48-.743.928-1.345 1.367-1.807l.016-.016-1.382-1.24z"
fill="url(#lobe-icons-meta-fill-6)"></path>
<path d="M20.918 5.713C19.853 4.633 18.583 4 17.225 4c-1.432 0-2.637.787-3.723 1.944l-.016.016 1.382 1.24.016-.017c.715-.747 1.408-1.12 2.176-1.12.826 0 1.6.39 2.27 1.075l.015.016 1.589-1.425-.016-.016z"
fill="#0082FB"></path>
<path d="M23.998 14.125c-.06-3.467-1.27-6.566-3.064-8.396l-.016-.016-1.588 1.424.015.016c1.35 1.392 2.277 3.98 2.361 6.971v.023h2.292v-.022z"
fill="url(#lobe-icons-meta-fill-7)"></path>
<path d="M23.998 14.15v-.023h-2.292v.022c.004.14.006.282.006.424 0 .815-.121 1.474-.368 1.95l-.011.022 1.708 1.782.013-.02c.62-.96.946-2.293.946-3.91 0-.083 0-.165-.002-.247z"
fill="url(#lobe-icons-meta-fill-8)"></path>
<path d="M21.344 16.52l-.011.02c-.214.402-.519.67-.917.787l.778 2.462a3.493 3.493 0 00.438-.182 3.558 3.558 0 001.366-1.218l.044-.065.012-.02-1.71-1.784z"
fill="url(#lobe-icons-meta-fill-9)"></path>
<path d="M19.92 17.393c-.262 0-.492-.039-.718-.14l-.798 2.522c.449.153.927.222 1.46.222.492 0 .943-.073 1.352-.215l-.78-2.462c-.167.05-.341.075-.517.073z"
fill="url(#lobe-icons-meta-fill-10)"></path>
<path d="M18.323 16.534l-.014-.017-1.836 1.914.016.017c.637.682 1.246 1.105 1.937 1.337l.797-2.52c-.291-.125-.573-.353-.9-.731z"
fill="url(#lobe-icons-meta-fill-11)"></path>
<path d="M18.309 16.515c-.55-.642-1.232-1.712-2.303-3.44l-1.396-2.336-.011-.02-1.62 2.438.012.02.989 1.668c.959 1.61 1.74 2.774 2.493 3.585l.016.016 1.834-1.914a2.353 2.353 0 01-.014-.017z"
fill="url(#lobe-icons-meta-fill-12)"></path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 7.3 KiB

View File

@ -0,0 +1,15 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Mistral</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<g fill="none" fill-rule="nonzero">
<path d="M15 6v4h-2V6h2zm4-4v4h-2V2h2zM3 2H1h2zM1 2h2v20H1V2zm8 12h2v4H9v-4zm8 0h2v8h-2v-8z"
fill="#000"></path>
<path d="M19 2h4v4h-4V2zM3 2h4v4H3V2z" fill="#F7D046"></path>
<path d="M15 10V6h8v4h-8zM3 10V6h8v4H3z" fill="#F2A73B"></path>
<path d="M3 14v-4h20v4z" fill="#EE792F"></path>
<path d="M11 14h4v4h-4v-4zm8 0h4v4h-4v-4zM3 14h4v4H3v-4z" fill="#EB5829"></path>
<path d="M19 18h4v4h-4v-4zM3 18h4v4H3v-4z" fill="#EA3326"></path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 834 B

View File

@ -0,0 +1,8 @@
<svg fill="#333" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30"
width="1em" xmlns="http://www.w3.org/2000/svg">
<title>MoonshotAI</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<path d="M1.052 16.916l9.539 2.552a21.007 21.007 0 00.06 2.033l5.956 1.593a11.997 11.997 0 01-5.586.865l-.18-.016-.044-.004-.084-.009-.094-.01a11.605 11.605 0 01-.157-.02l-.107-.014-.11-.016a11.962 11.962 0 01-.32-.051l-.042-.008-.075-.013-.107-.02-.07-.015-.093-.019-.075-.016-.095-.02-.097-.023-.094-.022-.068-.017-.088-.022-.09-.024-.095-.025-.082-.023-.109-.03-.062-.02-.084-.025-.093-.028-.105-.034-.058-.019-.08-.026-.09-.031-.066-.024a6.293 6.293 0 01-.044-.015l-.068-.025-.101-.037-.057-.022-.08-.03-.087-.035-.088-.035-.079-.032-.095-.04-.063-.028-.063-.027a5.655 5.655 0 01-.041-.018l-.066-.03-.103-.047-.052-.024-.096-.046-.062-.03-.084-.04-.086-.044-.093-.047-.052-.027-.103-.055-.057-.03-.058-.032a6.49 6.49 0 01-.046-.026l-.094-.053-.06-.034-.051-.03-.072-.041-.082-.05-.093-.056-.052-.032-.084-.053-.061-.039-.079-.05-.07-.047-.053-.035a7.785 7.785 0 01-.054-.036l-.044-.03-.044-.03a6.066 6.066 0 01-.04-.028l-.057-.04-.076-.054-.069-.05-.074-.054-.056-.042-.076-.057-.076-.059-.086-.067-.045-.035-.064-.052-.074-.06-.089-.073-.046-.039-.046-.039a7.516 7.516 0 01-.043-.037l-.045-.04-.061-.053-.07-.062-.068-.06-.062-.058-.067-.062-.053-.05-.088-.084a13.28 13.28 0 01-.099-.097l-.029-.028-.041-.042-.069-.07-.05-.051-.05-.053a6.457 6.457 0 01-.168-.179l-.08-.088-.062-.07-.071-.08-.042-.049-.053-.062-.058-.068-.046-.056a7.175 7.175 0 01-.027-.033l-.045-.055-.066-.082-.041-.052-.05-.064-.02-.025a11.99 11.99 0 01-1.44-2.402zm-1.02-5.794l11.353 3.037a20.468 20.468 0 00-.469 2.011l10.817 2.894a12.076 12.076 0 01-1.845 2.005L.657 15.923l-.016-.046-.035-.104a11.965 11.965 0 01-.05-.153l-.007-.023a11.896 11.896 0 01-.207-.741l-.03-.126-.018-.08-.021-.097-.018-.081-.018-.09-.017-.084-.018-.094c-.026-.141-.05-.283-.071-.426l-.017-.118-.011-.083-.013-.102a12.01 12.01 0 01-.019-.161l-.005-.047a12.12 12.12 0 01-.034-2.145zm1.593-5.15l11.948 3.196c-.368.605-.705 1.231-1.01 1.875l11.295 3.022c-.142.82-.368 1.612-.668 2.365l-11.55-3.09L.124 10.26l.015-.1.008-.049.01-.067.015-.087.018-.098c.026-.148.056-.295.088-.442l.028-.124.02-.085.024-.097c.022-.09.045-.18.07-.268l.028-.102.023-.083.03-.1.025-.082.03-.096.026-.082.031-.095a11.896 11.896 0 011.01-2.232zm4.442-4.4L17.352 4.59a20.77 20.77 0 00-1.688 1.721l7.823 2.093c.267.852.442 1.744.513 2.665L2.106 5.213l.045-.065.027-.04.04-.055.046-.065.055-.076.054-.072.064-.086.05-.065.057-.073.055-.07.06-.074.055-.069.065-.077.054-.066.066-.077.053-.06.072-.082.053-.06.067-.074.054-.058.073-.078.058-.06.063-.067.168-.17.1-.098.059-.056.076-.071a12.084 12.084 0 012.272-1.677zM12.017 0h.097l.082.001.069.001.054.002.068.002.046.001.076.003.047.002.06.003.054.002.087.005.105.007.144.011.088.007.044.004.077.008.082.008.047.005.102.012.05.006.108.014.081.01.042.006.065.01.207.032.07.012.065.011.14.026.092.018.11.022.046.01.075.016.041.01L14.7.3l.042.01.065.015.049.012.071.017.096.024.112.03.113.03.113.032.05.015.07.02.078.024.073.023.05.016.05.016.076.025.099.033.102.036.048.017.064.023.093.034.11.041.116.045.1.04.047.02.06.024.041.018.063.026.04.018.057.025.11.048.1.046.074.035.075.036.06.028.092.046.091.045.102.052.053.028.049.026.046.024.06.033.041.022.052.029.088.05.106.06.087.051.057.034.053.032.096.059.088.055.098.062.036.024.064.041.084.056.04.027.062.042.062.043.023.017c.054.037.108.075.161.114l.083.06.065.048.056.043.086.065.082.064.04.03.05.041.086.069.079.065.085.071c.712.6 1.353 1.283 1.909 2.031L7.222.994l.062-.027.065-.028.081-.034.086-.035c.113-.045.227-.09.341-.131l.096-.035.093-.033.084-.03.096-.031c.087-.03.176-.058.264-.085l.091-.027.086-.025.102-.03.085-.023.1-.026L9.04.37l.09-.023.091-.022.095-.022.09-.02.098-.021.091-.02.095-.018.092-.018.1-.018.091-.016.098-.017.092-.014.097-.015.092-.013.102-.013.091-.012.105-.012.09-.01.105-.01c.093-.01.186-.018.28-.024l.106-.008.09-.005.11-.006.093-.004.1-.004.097-.002.099-.002.197-.002z"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

@ -0,0 +1,8 @@
<svg fill="#333" fill-rule="evenodd" height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30"
width="1em" xmlns="http://www.w3.org/2000/svg">
<title>OpenAI</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<path d="M21.55 10.004a5.416 5.416 0 00-.478-4.501c-1.217-2.09-3.662-3.166-6.05-2.66A5.59 5.59 0 0010.831 1C8.39.995 6.224 2.546 5.473 4.838A5.553 5.553 0 001.76 7.496a5.487 5.487 0 00.691 6.5 5.416 5.416 0 00.477 4.502c1.217 2.09 3.662 3.165 6.05 2.66A5.586 5.586 0 0013.168 23c2.443.006 4.61-1.546 5.361-3.84a5.553 5.553 0 003.715-2.66 5.488 5.488 0 00-.693-6.497v.001zm-8.381 11.558a4.199 4.199 0 01-2.675-.954c.034-.018.093-.05.132-.074l4.44-2.53a.71.71 0 00.364-.623v-6.176l1.877 1.069c.02.01.033.029.036.05v5.115c-.003 2.274-1.87 4.118-4.174 4.123zM4.192 17.78a4.059 4.059 0 01-.498-2.763c.032.02.09.055.131.078l4.44 2.53c.225.13.504.13.73 0l5.42-3.088v2.138a.068.068 0 01-.027.057L9.9 19.288c-1.999 1.136-4.552.46-5.707-1.51h-.001zM3.023 8.216A4.15 4.15 0 015.198 6.41l-.002.151v5.06a.711.711 0 00.364.624l5.42 3.087-1.876 1.07a.067.067 0 01-.063.005l-4.489-2.559c-1.995-1.14-2.679-3.658-1.53-5.63h.001zm15.417 3.54l-5.42-3.088L14.896 7.6a.067.067 0 01.063-.006l4.489 2.557c1.998 1.14 2.683 3.662 1.529 5.633a4.163 4.163 0 01-2.174 1.807V12.38a.71.71 0 00-.363-.623zm1.867-2.773a6.04 6.04 0 00-.132-.078l-4.44-2.53a.731.731 0 00-.729 0l-5.42 3.088V7.325a.068.068 0 01.027-.057L14.1 4.713c2-1.137 4.555-.46 5.707 1.513.487.833.664 1.809.499 2.757h.001zm-11.741 3.81l-1.877-1.068a.065.065 0 01-.036-.051V6.559c.001-2.277 1.873-4.122 4.181-4.12.976 0 1.92.338 2.671.954-.034.018-.092.05-.131.073l-4.44 2.53a.71.71 0 00-.365.623l-.003 6.173v.002zm1.02-2.168L12 9.25l2.414 1.375v2.75L12 14.75l-2.415-1.375v-2.75z"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

View File

@ -0,0 +1,14 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Qwen</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-qwen-fill" x1="0%" x2="100%" y1="0%" y2="0%">
<stop offset="0%" stop-color="#00055F" stop-opacity=".84"></stop>
<stop offset="100%" stop-color="#6F69F7" stop-opacity=".84"></stop>
</linearGradient>
</defs>
<path d="M12.604 1.34c.393.69.784 1.382 1.174 2.075a.18.18 0 00.157.091h5.552c.174 0 .322.11.446.327l1.454 2.57c.19.337.24.478.024.837-.26.43-.513.864-.76 1.3l-.367.658c-.106.196-.223.28-.04.512l2.652 4.637c.172.301.111.494-.043.77-.437.785-.882 1.564-1.335 2.34-.159.272-.352.375-.68.37-.777-.016-1.552-.01-2.327.016a.099.099 0 00-.081.05 575.097 575.097 0 01-2.705 4.74c-.169.293-.38.363-.725.364-.997.003-2.002.004-3.017.002a.537.537 0 01-.465-.271l-1.335-2.323a.09.09 0 00-.083-.049H4.982c-.285.03-.553-.001-.805-.092l-1.603-2.77a.543.543 0 01-.002-.54l1.207-2.12a.198.198 0 000-.197 550.951 550.951 0 01-1.875-3.272l-.79-1.395c-.16-.31-.173-.496.095-.965.465-.813.927-1.625 1.387-2.436.132-.234.304-.334.584-.335a338.3 338.3 0 012.589-.001.124.124 0 00.107-.063l2.806-4.895a.488.488 0 01.422-.246c.524-.001 1.053 0 1.583-.006L11.704 1c.341-.003.724.032.9.34zm-3.432.403a.06.06 0 00-.052.03L6.254 6.788a.157.157 0 01-.135.078H3.253c-.056 0-.07.025-.041.074l5.81 10.156c.025.042.013.062-.034.063l-2.795.015a.218.218 0 00-.2.116l-1.32 2.31c-.044.078-.021.118.068.118l5.716.008c.046 0 .08.02.104.061l1.403 2.454c.046.081.092.082.139 0l5.006-8.76.783-1.382a.055.055 0 01.096 0l1.424 2.53a.122.122 0 00.107.062l2.763-.02a.04.04 0 00.035-.02.041.041 0 000-.04l-2.9-5.086a.108.108 0 010-.113l.293-.507 1.12-1.977c.024-.041.012-.062-.035-.062H9.2c-.059 0-.073-.026-.043-.077l1.434-2.505a.107.107 0 000-.114L9.225 1.774a.06.06 0 00-.053-.031zm6.29 8.02c.046 0 .058.02.034.06l-.832 1.465-2.613 4.585a.056.056 0 01-.05.029.058.058 0 01-.05-.029L8.498 9.841c-.02-.034-.01-.052.028-.054l.216-.012 6.722-.012z"
fill="url(#lobe-icons-qwen-fill)" fill-rule="nonzero"></path>
</g>
</svg>

After

Width:  |  Height:  |  Size: 2.2 KiB

View File

@ -0,0 +1,18 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 30 30" width="1em" xmlns="http://www.w3.org/2000/svg">
<title>Wenxin</title>
<rect width="30" height="30" fill="#E7F8FF" rx="6"/>
<g transform="translate(3, 3)">
<defs>
<linearGradient id="lobe-icons-wenxin-fill" x1="9.155%" x2="90.531%" y1="75.177%" y2="25.028%">
<stop offset="0%" stop-color="#0A51C3"></stop>
<stop offset="100%" stop-color="#23A4FB"></stop>
</linearGradient>
</defs>
<g fill="none" fill-rule="nonzero">
<path d="M11.32 1.176a1.4 1.4 0 011.36 0l8.64 4.843c.421.234.68.67.68 1.141v9.68c0 .472-.259.908-.68 1.143l-8.64 4.84a1.4 1.4 0 01-1.36 0l-8.64-4.84A1.31 1.31 0 012 16.84V7.159c0-.471.259-.907.68-1.142l8.64-4.84zm7.42 13.839V8.227L12.002 12 12 19.551l6.059-3.394a1.31 1.31 0 00.68-1.142zM12.68 4.833a1.393 1.393 0 00-1.36 0L5.944 7.846c-.421.235-.68.67-.68 1.142v6.027c0 .47.259.905.68 1.142l2.795 1.566V11.09a1.546 1.546 0 00.221.79 1.527 1.527 0 01-.216-.834l.004-.094.02-.15.018-.084.017-.062.039-.117.062-.142.035-.065.081-.13.094-.122.084-.091.08-.075.125-.1.071-.048.134-.076 5.87-3.29-2.796-1.566z"
fill="url(#lobe-icons-wenxin-fill)"></path>
<path d="M12 11.088c0-.875-.73-1.584-1.631-1.584a1.66 1.66 0 00-.855.237c-.027.016-.055.033-.08.05a2.361 2.361 0 00-.123.093c-.022.02-.045.038-.066.059l-.048.045-.063.067c-.014.016-.028.031-.04.048a2.303 2.303 0 00-.094.125l-.042.069a1.7 1.7 0 00-.07.13l-.036.081a.764.764 0 00-.022.06c-.01.03-.02.058-.028.087l-.017.062a.883.883 0 00-.03.16c-.002.025-.007.05-.008.074a1.527 1.527 0 00.213.929c.302.508.85.792 1.414.792.277 0 .558-.068.814-.212l.815-.457v-.914L12 11.088z"
fill="#012F8D"></path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

15
app/icons/mcp.svg Normal file
View File

@ -0,0 +1,15 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 180 180" fill="none">
<g clip-path="url(#clip0_19_13)">
<path d="M18 84.8528L85.8822 16.9706C95.2548 7.59798 110.451 7.59798 119.823 16.9706V16.9706C129.196 26.3431 129.196 41.5391 119.823 50.9117L68.5581 102.177"
stroke="black" stroke-width="12" stroke-linecap="round"/>
<path d="M69.2652 101.47L119.823 50.9117C129.196 41.5391 144.392 41.5391 153.765 50.9117L154.118 51.2652C163.491 60.6378 163.491 75.8338 154.118 85.2063L92.7248 146.6C89.6006 149.724 89.6006 154.789 92.7248 157.913L105.331 170.52"
stroke="black" stroke-width="12" stroke-linecap="round"/>
<path d="M102.853 33.9411L52.6482 84.1457C43.2756 93.5183 43.2756 108.714 52.6482 118.087V118.087C62.0208 127.459 77.2167 127.459 86.5893 118.087L136.794 67.8822"
stroke="black" stroke-width="12" stroke-linecap="round"/>
</g>
<defs>
<clipPath id="clip0_19_13">
<rect width="180" height="180" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -1 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="16" height="16" fill="none" viewBox="0 0 16 16"><defs><rect id="path_0" width="16" height="16" x="0" y="0"/></defs><g opacity="1" transform="translate(0 0) rotate(0 8 8)"><mask id="bg-mask-0" fill="#fff"><use xlink:href="#path_0"/></mask><g mask="url(#bg-mask-0)"><path id="路径 1" style="stroke:#333;stroke-width:1.3333333333333333;stroke-opacity:1;stroke-dasharray:0 0" d="M13.33,6.67C13.33,2.98 10.35,0 6.67,0C2.98,0 0,2.98 0,6.67C0,10.35 2.98,13.33 6.67,13.33C10.35,13.33 13.33,10.35 13.33,6.67Z" transform="translate(1.3333333333333333 1.3333333333333333) rotate(0 6.666666666666666 6.666666666666666)"/><path id="路径 2" style="stroke:#333;stroke-width:1.3333333333333333;stroke-opacity:1;stroke-dasharray:0 0" d="M0,0L0,4" transform="translate(6.333333333333333 6) rotate(0 0 2)"/><path id="路径 3" style="stroke:#333;stroke-width:1.3333333333333333;stroke-opacity:1;stroke-dasharray:0 0" d="M0,0L0,4" transform="translate(9.666666666666666 6) rotate(0 0 2)"/></g></g></svg> <svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<rect x="3" y="3" width="18" height="18" rx="2" ry="2"></rect>
</svg>

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 253 B

3
app/icons/play.svg Normal file
View File

@ -0,0 +1,3 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
<polygon points="5 3 19 12 5 21 5 3"></polygon>
</svg>

After

Width:  |  Height:  |  Size: 239 B

7
app/icons/power.svg Normal file
View File

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
d="M14.5 8C13.8406 8.37652 13.2062 8.79103 12.6 9.24051C11.5625 10.0097 10.6074 10.8814 9.75 11.8402C6.79377 15.1463 5 19.4891 5 24.2455C5 34.6033 13.5066 43 24 43C34.4934 43 43 34.6033 43 24.2455C43 19.4891 41.2062 15.1463 38.25 11.8402C37.3926 10.8814 36.4375 10.0097 35.4 9.24051C34.7938 8.79103 34.1594 8.37652 33.5 8"
stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
<path d="M24 4V24" stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
</svg>

After

Width:  |  Height:  |  Size: 675 B

1
app/icons/tool.svg Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24"><path fill="currentColor" fill-rule="evenodd" d="M10.155 3.247c-.519.396-1.129 1.004-2.012 1.887s-1.49 1.493-1.887 2.012c-.383.502-.497.83-.497 1.14s.114.638.497 1.14c.397.52 1.004 1.13 1.887 2.012l4.419 4.419c.883.883 1.493 1.49 2.012 1.887c.502.383.83.497 1.14.497s.638-.114 1.14-.497c.519-.396 1.129-1.004 2.012-1.887s1.49-1.493 1.887-2.012c.383-.503.497-.83.497-1.14s-.114-.638-.497-1.14c-.396-.52-1.004-1.13-1.887-2.012l-4.419-4.419c-.883-.883-1.493-1.49-2.012-1.887c-.502-.383-.83-.497-1.14-.497s-.637.114-1.14.497m-.91-1.192c.636-.485 1.28-.805 2.05-.805s1.414.32 2.05.805c.609.464 1.29 1.145 2.125 1.98l.244.245c.239-.238.451-.44.685-.574a2.31 2.31 0 0 1 2.312 0c.267.154.505.393.787.675l.06.06l.061.061c.282.282.521.52.675.787a2.31 2.31 0 0 1 0 2.312c-.135.234-.336.446-.574.685l.245.244c.835.836 1.516 1.516 1.98 2.125c.485.636.805 1.28.805 2.05s-.32 1.414-.805 2.05c-.464.608-1.145 1.289-1.98 2.124l-.077.077c-.835.835-1.516 1.516-2.125 1.98c-.635.485-1.28.805-2.05.805c-.768 0-1.413-.32-2.049-.805c-.609-.464-1.29-1.145-2.125-1.98l-.244-.245l-4.993 4.994l-.06.06c-.282.282-.52.521-.787.675a2.31 2.31 0 0 1-2.312 0c-.267-.154-.505-.393-.787-.675l-.06-.06l-.061-.061c-.282-.282-.521-.52-.675-.787a2.31 2.31 0 0 1 0-2.312c.154-.266.393-.505.675-.786l.06-.061l4.994-4.993l-.245-.244c-.835-.836-1.516-1.516-1.98-2.125c-.485-.636-.805-1.28-.805-2.05s.32-1.414.805-2.05c.464-.608 1.145-1.289 1.98-2.124l.077-.077c.835-.835 1.516-1.516 2.125-1.98m-.896 11.71L3.356 18.76c-.376.376-.456.465-.497.536a.81.81 0 0 0 0 .812c.04.072.12.16.497.537c.377.376.466.456.537.497a.81.81 0 0 0 .812 0c.07-.04.16-.12.536-.497l4.994-4.993zm10.31-6.54c.24-.243.302-.314.336-.374a.81.81 0 0 0 0-.812c-.041-.071-.12-.16-.497-.537c-.377-.376-.466-.456-.537-.497a.81.81 0 0 0-.812 0c-.06.034-.131.096-.374.336z" clip-rule="evenodd"/></svg>

After

Width:  |  Height:  |  Size: 1.9 KiB

13
app/icons/voice-off.svg Normal file
View File

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
<path
d="M31 24V11C31 7.13401 27.866 4 24 4C20.134 4 17 7.13401 17 11V24C17 27.866 20.134 31 24 31C27.866 31 31 27.866 31 24Z"
stroke="#d0021b" stroke-width="4" stroke-linejoin="round" />
<path
d="M9 23C9 31.2843 15.7157 38 24 38C25.7532 38 27.4361 37.6992 29 37.1465M39 23C39 25.1333 38.5547 27.1626 37.7519 29"
stroke="#d0021b" stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
<path d="M24 38V44" stroke="#d0021b" stroke-width="4" stroke-linecap="round"
stroke-linejoin="round" />
<path d="M42 42L6 6" stroke="#d0021b" stroke-width="4" stroke-linecap="round"
stroke-linejoin="round" />
</svg>

After

Width:  |  Height:  |  Size: 811 B

9
app/icons/voice.svg Normal file
View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="24" height="24" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect x="17" y="4" width="14" height="27" rx="7" fill="none" stroke="#333" stroke-width="4"
stroke-linejoin="round" />
<path d="M9 23C9 31.2843 15.7157 38 24 38C32.2843 38 39 31.2843 39 23" stroke="#333"
stroke-width="4" stroke-linecap="round" stroke-linejoin="round" />
<path d="M24 38V44" stroke="#333" stroke-width="4" stroke-linecap="round"
stroke-linejoin="round" />
</svg>

After

Width:  |  Height:  |  Size: 549 B

View File

@ -5,9 +5,8 @@ import "./styles/highlight.scss";
import { getClientConfig } from "./config/client"; import { getClientConfig } from "./config/client";
import type { Metadata, Viewport } from "next"; import type { Metadata, Viewport } from "next";
import { SpeedInsights } from "@vercel/speed-insights/next"; import { SpeedInsights } from "@vercel/speed-insights/next";
import { getServerSideConfig } from "./config/server";
import { GoogleTagManager, GoogleAnalytics } from "@next/third-parties/google"; import { GoogleTagManager, GoogleAnalytics } from "@next/third-parties/google";
const serverConfig = getServerSideConfig(); import { getServerSideConfig } from "./config/server";
export const metadata: Metadata = { export const metadata: Metadata = {
title: "NextChat", title: "NextChat",
@ -33,6 +32,8 @@ export default function RootLayout({
}: { }: {
children: React.ReactNode; children: React.ReactNode;
}) { }) {
const serverConfig = getServerSideConfig();
return ( return (
<html lang="en"> <html lang="en">
<head> <head>

200
app/lib/audio.ts Normal file
View File

@ -0,0 +1,200 @@
export class AudioHandler {
private context: AudioContext;
private mergeNode: ChannelMergerNode;
private analyserData: Uint8Array;
public analyser: AnalyserNode;
private workletNode: AudioWorkletNode | null = null;
private stream: MediaStream | null = null;
private source: MediaStreamAudioSourceNode | null = null;
private recordBuffer: Int16Array[] = [];
private readonly sampleRate = 24000;
private nextPlayTime: number = 0;
private isPlaying: boolean = false;
private playbackQueue: AudioBufferSourceNode[] = [];
private playBuffer: Int16Array[] = [];
constructor() {
this.context = new AudioContext({ sampleRate: this.sampleRate });
// using ChannelMergerNode to get merged audio data, and then get analyser data.
this.mergeNode = new ChannelMergerNode(this.context, { numberOfInputs: 2 });
this.analyser = new AnalyserNode(this.context, { fftSize: 256 });
this.analyserData = new Uint8Array(this.analyser.frequencyBinCount);
this.mergeNode.connect(this.analyser);
}
getByteFrequencyData() {
this.analyser.getByteFrequencyData(this.analyserData);
return this.analyserData;
}
async initialize() {
await this.context.audioWorklet.addModule("/audio-processor.js");
}
async startRecording(onChunk: (chunk: Uint8Array) => void) {
try {
if (!this.workletNode) {
await this.initialize();
}
this.stream = await navigator.mediaDevices.getUserMedia({
audio: {
channelCount: 1,
sampleRate: this.sampleRate,
echoCancellation: true,
noiseSuppression: true,
},
});
await this.context.resume();
this.source = this.context.createMediaStreamSource(this.stream);
this.workletNode = new AudioWorkletNode(
this.context,
"audio-recorder-processor",
);
this.workletNode.port.onmessage = (event) => {
if (event.data.eventType === "audio") {
const float32Data = event.data.audioData;
const int16Data = new Int16Array(float32Data.length);
for (let i = 0; i < float32Data.length; i++) {
const s = Math.max(-1, Math.min(1, float32Data[i]));
int16Data[i] = s < 0 ? s * 0x8000 : s * 0x7fff;
}
const uint8Data = new Uint8Array(int16Data.buffer);
onChunk(uint8Data);
// save recordBuffer
// @ts-ignore
this.recordBuffer.push.apply(this.recordBuffer, int16Data);
}
};
this.source.connect(this.workletNode);
this.source.connect(this.mergeNode, 0, 0);
this.workletNode.connect(this.context.destination);
this.workletNode.port.postMessage({ command: "START_RECORDING" });
} catch (error) {
console.error("Error starting recording:", error);
throw error;
}
}
stopRecording() {
if (!this.workletNode || !this.source || !this.stream) {
throw new Error("Recording not started");
}
this.workletNode.port.postMessage({ command: "STOP_RECORDING" });
this.workletNode.disconnect();
this.source.disconnect();
this.stream.getTracks().forEach((track) => track.stop());
}
startStreamingPlayback() {
this.isPlaying = true;
this.nextPlayTime = this.context.currentTime;
}
stopStreamingPlayback() {
this.isPlaying = false;
this.playbackQueue.forEach((source) => source.stop());
this.playbackQueue = [];
this.playBuffer = [];
}
playChunk(chunk: Uint8Array) {
if (!this.isPlaying) return;
const int16Data = new Int16Array(chunk.buffer);
// @ts-ignore
this.playBuffer.push.apply(this.playBuffer, int16Data); // save playBuffer
const float32Data = new Float32Array(int16Data.length);
for (let i = 0; i < int16Data.length; i++) {
float32Data[i] = int16Data[i] / (int16Data[i] < 0 ? 0x8000 : 0x7fff);
}
const audioBuffer = this.context.createBuffer(
1,
float32Data.length,
this.sampleRate,
);
audioBuffer.getChannelData(0).set(float32Data);
const source = this.context.createBufferSource();
source.buffer = audioBuffer;
source.connect(this.context.destination);
source.connect(this.mergeNode, 0, 1);
const chunkDuration = audioBuffer.length / this.sampleRate;
source.start(this.nextPlayTime);
this.playbackQueue.push(source);
source.onended = () => {
const index = this.playbackQueue.indexOf(source);
if (index > -1) {
this.playbackQueue.splice(index, 1);
}
};
this.nextPlayTime += chunkDuration;
if (this.nextPlayTime < this.context.currentTime) {
this.nextPlayTime = this.context.currentTime;
}
}
_saveData(data: Int16Array, bytesPerSample = 16): Blob {
const headerLength = 44;
const numberOfChannels = 1;
const byteLength = data.buffer.byteLength;
const header = new Uint8Array(headerLength);
const view = new DataView(header.buffer);
view.setUint32(0, 1380533830, false); // RIFF identifier 'RIFF'
view.setUint32(4, 36 + byteLength, true); // file length minus RIFF identifier length and file description length
view.setUint32(8, 1463899717, false); // RIFF type 'WAVE'
view.setUint32(12, 1718449184, false); // format chunk identifier 'fmt '
view.setUint32(16, 16, true); // format chunk length
view.setUint16(20, 1, true); // sample format (raw)
view.setUint16(22, numberOfChannels, true); // channel count
view.setUint32(24, this.sampleRate, true); // sample rate
view.setUint32(28, this.sampleRate * 4, true); // byte rate (sample rate * block align)
view.setUint16(32, numberOfChannels * 2, true); // block align (channel count * bytes per sample)
view.setUint16(34, bytesPerSample, true); // bits per sample
view.setUint32(36, 1684108385, false); // data chunk identifier 'data'
view.setUint32(40, byteLength, true); // data chunk length
// using data.buffer, so no need to setUint16 to view.
return new Blob([view, data.buffer], { type: "audio/mpeg" });
}
savePlayFile() {
// @ts-ignore
return this._saveData(new Int16Array(this.playBuffer));
}
saveRecordFile(
audioStartMillis: number | undefined,
audioEndMillis: number | undefined,
) {
const startIndex = audioStartMillis
? Math.floor((audioStartMillis * this.sampleRate) / 1000)
: 0;
const endIndex = audioEndMillis
? Math.floor((audioEndMillis * this.sampleRate) / 1000)
: this.recordBuffer.length;
return this._saveData(
// @ts-ignore
new Int16Array(this.recordBuffer.slice(startIndex, endIndex)),
);
}
async close() {
this.recordBuffer = [];
this.workletNode?.disconnect();
this.source?.disconnect();
this.stream?.getTracks().forEach((track) => track.stop());
await this.context.close();
}
}

View File

@ -106,6 +106,7 @@ const cn = {
copyLastMessage: "复制最后一个回复", copyLastMessage: "复制最后一个回复",
copyLastCode: "复制最后一个代码块", copyLastCode: "复制最后一个代码块",
showShortcutKey: "显示快捷方式", showShortcutKey: "显示快捷方式",
clearContext: "清除上下文",
}, },
}, },
Export: { Export: {
@ -176,7 +177,7 @@ const cn = {
}, },
}, },
Lang: { Lang: {
Name: "Language", // ATTENTION: if you wanna add a new translation, please do not translate this value, leave it as `Language` Name: "Language", // 注意:如果要添加新的翻译,请不要翻译此值,将它保留为 `Language`
All: "所有语言", All: "所有语言",
}, },
Avatar: "头像", Avatar: "头像",
@ -205,6 +206,8 @@ const cn = {
IsChecking: "正在检查更新...", IsChecking: "正在检查更新...",
FoundUpdate: (x: string) => `发现新版本:${x}`, FoundUpdate: (x: string) => `发现新版本:${x}`,
GoToUpdate: "前往更新", GoToUpdate: "前往更新",
Success: "更新成功!",
Failed: "更新失败",
}, },
SendKey: "发送键", SendKey: "发送键",
Theme: "主题", Theme: "主题",
@ -460,6 +463,50 @@ const cn = {
SubTitle: "样例:", SubTitle: "样例:",
}, },
}, },
DeepSeek: {
ApiKey: {
Title: "接口密钥",
SubTitle: "使用自定义DeepSeek API Key",
Placeholder: "DeepSeek API Key",
},
Endpoint: {
Title: "接口地址",
SubTitle: "样例:",
},
},
XAI: {
ApiKey: {
Title: "接口密钥",
SubTitle: "使用自定义XAI API Key",
Placeholder: "XAI API Key",
},
Endpoint: {
Title: "接口地址",
SubTitle: "样例:",
},
},
ChatGLM: {
ApiKey: {
Title: "接口密钥",
SubTitle: "使用自定义 ChatGLM API Key",
Placeholder: "ChatGLM API Key",
},
Endpoint: {
Title: "接口地址",
SubTitle: "样例:",
},
},
SiliconFlow: {
ApiKey: {
Title: "接口密钥",
SubTitle: "使用自定义硅基流动 API Key",
Placeholder: "硅基流动 API Key",
},
Endpoint: {
Title: "接口地址",
SubTitle: "样例:",
},
},
Stability: { Stability: {
ApiKey: { ApiKey: {
Title: "接口密钥", Title: "接口密钥",
@ -495,8 +542,8 @@ const cn = {
Model: "模型 (model)", Model: "模型 (model)",
CompressModel: { CompressModel: {
Title: "压缩模型", Title: "对话摘要模型",
SubTitle: "用于压缩历史记录的模型", SubTitle: "用于压缩历史记录、生成对话标题的模型",
}, },
Temperature: { Temperature: {
Title: "随机性 (temperature)", Title: "随机性 (temperature)",
@ -538,6 +585,39 @@ const cn = {
SubTitle: "生成语音的速度", SubTitle: "生成语音的速度",
}, },
}, },
Realtime: {
Enable: {
Title: "实时聊天",
SubTitle: "开启实时聊天功能",
},
Provider: {
Title: "模型服务商",
SubTitle: "切换不同的服务商",
},
Model: {
Title: "模型",
SubTitle: "选择一个模型",
},
ApiKey: {
Title: "API Key",
SubTitle: "API Key",
Placeholder: "API Key",
},
Azure: {
Endpoint: {
Title: "接口地址",
SubTitle: "接口地址",
},
Deployment: {
Title: "部署名称",
SubTitle: "部署名称",
},
},
Temperature: {
Title: "随机性 (temperature)",
SubTitle: "值越大,回复越随机",
},
},
}, },
Store: { Store: {
DefaultTopic: "新的聊天", DefaultTopic: "新的聊天",
@ -569,11 +649,14 @@ const cn = {
Discovery: { Discovery: {
Name: "发现", Name: "发现",
}, },
Mcp: {
Name: "MCP",
},
FineTuned: { FineTuned: {
Sysmessage: "你是一个助手", Sysmessage: "你是一个助手",
}, },
SearchChat: { SearchChat: {
Name: "搜索", Name: "搜索聊天记录",
Page: { Page: {
Title: "搜索聊天记录", Title: "搜索聊天记录",
Search: "输入搜索关键词", Search: "输入搜索关键词",
@ -665,6 +748,10 @@ const cn = {
Title: "启用Artifacts", Title: "启用Artifacts",
SubTitle: "启用之后可以直接渲染HTML页面", SubTitle: "启用之后可以直接渲染HTML页面",
}, },
CodeFold: {
Title: "启用代码折叠",
SubTitle: "启用之后可以自动折叠/展开过长的代码块",
},
Share: { Share: {
Title: "分享此面具", Title: "分享此面具",
SubTitle: "生成此面具的直达链接", SubTitle: "生成此面具的直达链接",

Some files were not shown because too many files have changed in this diff Show More