Compare commits

..

1282 Commits

Author SHA1 Message Date
RiverRay 48469bd8ca
Merge pull request #6392 from ChatGPTNextWeb/Leizhenpeng-patch-6
Update README.md
2025-03-20 17:52:02 +08:00
RiverRay 5a5e887f2b
Update README.md 2025-03-20 17:51:47 +08:00
RiverRay b6f5d75656
Merge pull request #6344 from vangie/fix/jest-setup-esm
test: fix unit test failures
2025-03-14 20:04:56 +08:00
Vangie Du 0d41a17ef6 test: fix unit test failures 2025-03-07 14:49:17 +08:00
RiverRay f7cde17919
Merge pull request #6292 from Little-LittleProgrammer/feature/alibaba-omni-support
feat(alibaba): Added alibaba vision model and omni model support
2025-03-01 10:25:16 +08:00
RiverRay 570cbb34b6
Merge pull request #6310 from agi-dude/patch-1
Remove duplicate links
2025-03-01 10:24:38 +08:00
RiverRay 7aa9ae0a3e
Merge pull request #6311 from ChatGPTNextWeb/6305-bugthe-first-message-except-the-system-message-of-deepseek-reasoner-must-be-a-user-message-but-an-assistant-message-detected
fix: enforce that the first message (excluding system messages) is a …
2025-02-28 19:48:09 +08:00
Kadxy 2d4180f5be fix: update request payload to use filtered messages in Deepseek API 2025-02-28 13:59:30 +08:00
Kadxy 9f0182b55e fix: enforce that the first message (excluding system messages) is a user message in the Deepseek API 2025-02-28 13:54:58 +08:00
Mr. AGI ad6666eeaf
Update README.md 2025-02-28 10:47:52 +05:00
EvanWu a2c4e468a0 fix(app/utils/chat.ts): fix type error 2025-02-26 19:58:32 +08:00
RiverRay 2167076652
Merge pull request #6293 from hyiip/main
claude 3.7 support
2025-02-26 18:41:28 +08:00
RiverRay e123076250
Merge pull request #6295 from rexkyng/patch-1
Fix: Improve Mistral icon detection and remove redundant code.
2025-02-26 18:39:59 +08:00
Rex Ng ebcb4db245
Fix: Improve Mistral icon detection and remove redundant code.
- Added "codestral" to the list of acceptable names for the Mistral icon, ensuring proper detection.
- Removed duplicate `toLowerCase()` calls.
2025-02-25 14:30:18 +08:00
EvanWu 0a25a1a8cb refacto(app/utils/chat.ts)r: optimize function preProcessImageContentBase 2025-02-25 09:22:47 +08:00
hyiip f3154b20a5 claude 3.7 support 2025-02-25 03:55:24 +08:00
EvanWu b709ee3983 feat(alibaba): Added alibaba vision model and omni model support 2025-02-24 20:18:07 +08:00
RiverRay f5f3ce94f6
Update README.md 2025-02-21 08:56:43 +08:00
RiverRay 2b5f600308
Update README.md 2025-02-21 08:55:40 +08:00
RiverRay b966107117
Merge pull request #6235 from DBCDK/danish-locale
Translation to danish
2025-02-17 22:58:01 +08:00
river 377480b448 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-16 10:50:07 +08:00
river 8bd0d6a1a7 chore: Update NextChatAI domain from nextchat.dev to nextchat.club 2025-02-16 10:48:54 +08:00
Rasmus Erik Voel Jensen 90827fc593 danish rewording / improved button label 2025-02-15 13:08:58 +01:00
Rasmus Erik Voel Jensen 008e339b6d danish locale 2025-02-15 12:52:44 +01:00
RiverRay 12863f5213
Merge pull request #6204 from bestsanmao/ali_bytedance_reasoning_content
add 3 type of reasoning_content support (+deepseek-r1@OpenAI @Alibaba @ByteDance), parse <think></think> from SSE
2025-02-13 14:53:47 +08:00
suruiqiang cf140d4228 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into ali_bytedance_reasoning_content 2025-02-12 17:54:50 +08:00
suruiqiang 476d946f96 fix bug (trim eats space or \n mistakenly), optimize timeout by model 2025-02-12 17:49:54 +08:00
suruiqiang 9714258322 support deepseek-r1@OpenAI's reasoning_content, parse <think></think> from stream 2025-02-11 18:57:16 +08:00
RiverRay 48cd4b11b5
Merge pull request #6190 from siliconflow/refine-emoji-siliconflow
Fix model icon on SiliconFlow
2025-02-11 18:37:47 +08:00
RiverRay 77c78b230a
Merge pull request #6193 from siliconflow/get-models-siliconflow
Model listing of SiliconFlow
2025-02-11 18:37:22 +08:00
RiverRay b44686b887
Merge pull request #6189 from bestsanmao/bug_fix
fix avatar for export message preview and saved image
2025-02-11 18:36:50 +08:00
RiverRay 34bdd4b945
Merge pull request #6194 from siliconflow/vl-support-on-sf
Support VLM on SiliconFlow
2025-02-11 18:35:02 +08:00
suruiqiang b0758cccde optimization 2025-02-11 16:08:30 +08:00
suruiqiang 98a11e56d2 support alibaba and bytedance's reasoning_content 2025-02-11 12:46:46 +08:00
Shenghang Tsai 86f86962fb Support VLM on SiliconFlow 2025-02-10 13:39:06 +08:00
Shenghang Tsai 2137aa65bf Model listing of SiliconFlow 2025-02-10 11:03:49 +08:00
Shenghang Tsai 18fa2cc30d fix model icon on siliconflow 2025-02-09 18:49:26 +08:00
Shenghang Tsai 0bfc648085 fix model icon on siliconflow 2025-02-09 18:47:57 +08:00
suruiqiang 9f91c2d05c fix avatar for export message preview and saved image 2025-02-09 16:52:46 +08:00
RiverRay a029b4330b
Merge pull request #6188 from ChatGPTNextWeb/Leizhenpeng-patch-4
Update LICENSE
2025-02-09 11:05:43 +08:00
RiverRay 2842b264e0
Update LICENSE 2025-02-09 11:05:32 +08:00
RiverRay c2edfec16f
Merge pull request #6172 from bestsanmao/bug_fix
fix several bugs
2025-02-09 11:03:44 +08:00
RiverRay 6406ac99a3
Merge pull request #6175 from itsevin/main
Add other Xai model
2025-02-09 11:02:13 +08:00
suruiqiang 97a4aafc92 Merge remote-tracking branch 'remotes/origin/main' into bug_fix 2025-02-09 09:46:07 +08:00
GH Action - Upstream Sync d8f533e1f3 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-09 01:22:47 +00:00
RiverRay c6199dbf9f
Merge pull request #6186 from siliconflow/fix-truc-of-reasoning-model
Fix formatting of reasoning model on SiliconFlow
2025-02-08 23:40:39 +08:00
RiverRay 4273aa0803
Merge pull request #6185 from siliconflow/larger_timeout_for_siliconflow
Larger timeout for SiliconFlow
2025-02-08 23:39:49 +08:00
Shenghang Tsai acf75ce68f Remove unnecessary trimming 2025-02-08 16:34:17 +08:00
suruiqiang 1ae5fdbf01 mini optimizations 2025-02-08 16:15:10 +08:00
Shenghang Tsai 2a3996e0d6 Update siliconflow.ts 2025-02-08 14:38:12 +08:00
GH Action - Upstream Sync fdbaddde37 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2025-02-08 01:16:56 +00:00
suruiqiang d74f79e9c5 Merge remote-tracking branch 'remotes/origin/HEAD' into bug_fix 2025-02-08 08:29:34 +08:00
itsevin c4e9cb03a9 Add Xai model 2025-02-07 20:29:21 +08:00
RiverRay bf265d3375
Merge pull request #6164 from ZhangYichi-ZYc/main
Fix: Set consistent fill color for OpenAI/MoonShot/Grok SVG to prevent color inversion in dark mode
2025-02-07 20:25:20 +08:00
RiverRay 17f391d929
Merge pull request #6158 from dupl/main
update the lastest Gemini models
2025-02-07 20:23:47 +08:00
RiverRay 78186c27fb
Merge pull request #6168 from xiexin12138/fix-env
Fix: 补充 env 中硅基流动的环境变量;追加硅基流动 2 个支持的付费模型
2025-02-07 20:23:01 +08:00
suruiqiang a5a9768245 change request timeout for thinking mode 2025-02-07 16:34:14 +08:00
suruiqiang 3fe55b4f7f fix bug that gemini has multiple candidates part 2025-02-07 16:20:07 +08:00
suruiqiang f156430cc5 fix emoji issue for doubao and glm's congview & congvideox 2025-02-07 16:18:15 +08:00
suruiqiang f30c6a4348 fix doubao and grok not upload image 2025-02-07 16:14:19 +08:00
xiexin12138 a780b39c17 fix: 补充硅基流动对 DeepSeek 支持的付费模型 2025-02-07 15:43:50 +08:00
xiexin12138 1010db834c fix: 补充硅基流动的 env 环境变量 2025-02-07 15:41:40 +08:00
ZhangYichi 51384ddc5f Fix: Set consistent fill color for OpenAI/MoonShot/Grok SVG to prevent color inversion in dark mode 2025-02-07 11:13:22 +08:00
dupl e5e5fde924
update the lastest Gemini models 2025-02-07 06:50:31 +08:00
RiverRay add9ca200c
Merge pull request #6144 from Eric-2369/add-more-llm-icons
feat: add more llm icons
2025-02-06 18:08:08 +08:00
Eric-2369 5225a6e192
feat: add more llm icons 2025-02-05 12:34:00 +08:00
RiverRay 28cbe56cec
Merge pull request #6141 from siliconflow/provider_silicon
New provider SiliconFlow and Its Latest DeekSeek Models
2025-02-04 21:29:02 +08:00
Shenghang Tsai ad9ab9d45a New provider SiliconFlow and Its Latest DeekSeek Models
Update README.md

Update constant.ts

Update README_CN.md
2025-02-04 16:59:26 +08:00
RiverRay bb4832e6e7
Merge pull request #6129 from MonadMonAmi/update_knowledge_cutoff_date
chore: add knowledge cut off dates for o1 and o3
2025-02-04 09:38:04 +08:00
RiverRay 39b3487ea0
Merge branch 'main' into update_knowledge_cutoff_date 2025-02-04 09:37:55 +08:00
RiverRay 32b60909ae
Merge pull request #6132 from RetiredQQ/main
temporary fix for o3-mini
2025-02-04 09:35:43 +08:00
RiverRay 5db6775cb8
Merge pull request #6134 from zcong1993/main
fix: fix isModelNotavailableInServer logic for bytedance models
2025-02-04 09:34:43 +08:00
RiverRay b6881c7797
Merge pull request #6127 from dupl/main
add gemini-2.0-flash-thinking-exp, gemini-2.0-flash-thinking-exp-01-21
2025-02-04 09:33:13 +08:00
RiverRay 9943a52295
Update README.md 2025-02-04 09:31:16 +08:00
RiverRay 1db4d25370
Update README.md 2025-02-04 09:29:56 +08:00
zcong1993 92f57fb18f
fix: fix isModelNotavailableInServer logic for bytedance models 2025-02-03 16:58:42 +08:00
Sky 4c4d44e2f8 fix 2025-02-02 21:45:30 +00:00
Sky 8f12beb8f0 support o3-mini 2025-02-02 21:43:30 +00:00
AndrewS 2e7cac3218 chore: add knowledge cut off dates for o1 and o3 2025-02-02 19:44:53 +01:00
dupl 60fa358010
typo: OpanAI -> OpenAI 2025-02-02 23:27:45 +08:00
dupl 034b7d4655
add gemini-2.0-flash-thinking-exp, gemini-2.0-flash-thinking-exp-01-21 2025-02-02 23:11:07 +08:00
RiverRay 1e20b64048
Merge pull request #6121 from ChatGPTNextWeb/feat/support-openai-o3-mini
feat(model): add support for OpenAI o3-mini model
2025-02-02 20:57:21 +08:00
Kadxy 4f28fca506 feat: Support OpenAI o3-mini 2025-02-01 15:02:06 +08:00
RiverRay 3ef5993085
Merge pull request #6119 from ChatGPTNextWeb/Leizhenpeng-patch-3
Update README.md
2025-01-31 08:18:47 +08:00
RiverRay 09ad7c1875
Update README.md 2025-01-31 08:18:13 +08:00
RiverRay 31e52cb47e
更新 README.md 2025-01-31 06:53:39 +08:00
RiverRay 9a69c5bd7c
Merge pull request #6118 from ChatGPTNextWeb/feat/issue-6104-deepseek-reasoning-content 2025-01-31 06:48:00 +08:00
Kadxy be645aab37 fix: revert unintended changes 2025-01-31 00:59:03 +08:00
RiverRay c41e86faa6
Merge pull request #6116 from ChatGPTNextWeb/feat/issue-6104-deepseek-reasoning-content
Support DeepSeek API streaming reasoning content
2025-01-31 00:52:18 +08:00
river 143be69a7f chore: remove log 2025-01-31 00:50:03 +08:00
river 63b7626656 chore: change md 2025-01-31 00:49:09 +08:00
Kadxy dabb7c70d5 feat: Remove reasoning_contentfor DeepSeek API messages 2025-01-31 00:30:08 +08:00
Kadxy c449737127 feat: Support DeepSeek API streaming with thinking mode 2025-01-31 00:07:52 +08:00
RiverRay 553b8c9f28
Update .env.template 2025-01-27 13:05:17 +08:00
river 19314793b8 Merge branch 'bestsanmao-bug_fix' 2025-01-27 12:55:31 +08:00
river 8680182921 feat: Add DeepSeek API key and fix MCP environment variable parsing 2025-01-27 12:48:59 +08:00
suruiqiang 2173c82bb5 add deepseek-reasoner, and change deepseek's summary model to deepseek-chat 2025-01-23 18:47:22 +08:00
suruiqiang 0d5e66a9ae not insert mcpSystemPrompt if not ENABLE_MCP 2025-01-23 18:24:38 +08:00
RiverRay 2f9cb5a68f
Merge pull request #6084 from ChatGPTNextWeb/temp-fix
fix: missing mcp_config.json files required for building
2025-01-22 21:40:37 +08:00
Kadxy 55cacfb7e2 fix: missing files required for building 2025-01-22 21:28:29 +08:00
RiverRay 6a862372f7
Merge pull request #6082 from ChatGPTNextWeb/Leizhenpeng-patch-2
Update README_CN.md
2025-01-22 13:11:11 +08:00
RiverRay 81bd83eb44
Update README_CN.md 2025-01-22 13:08:33 +08:00
RiverRay b2b6fd81be
Merge pull request #6075 from Kadxy/main 2025-01-20 10:44:46 +08:00
Kadxy f22cfd7b33
Update chat.tsx 2025-01-20 10:10:52 +08:00
RiverRay 8111acff34
Update README.md 2025-01-20 00:17:47 +08:00
RiverRay 4cad55379d
Merge pull request #5974 from ChatGPTNextWeb/feat-mcp
Support MCP( WIP)
2025-01-20 00:07:41 +08:00
Kadxy a3d3ce3f4c
Merge branch 'main' into feat-mcp 2025-01-19 23:28:12 +08:00
Kadxy 611e97e641 docs: update README.md 2025-01-19 23:20:58 +08:00
Kadxy bfeea4ed49 fix: prevent MCP operations from blocking chat interface 2025-01-19 01:02:01 +08:00
Kadxy bc71ae247b feat: add ENABLE_MCP env var to toggle MCP feature globally and in Docker 2025-01-18 21:19:01 +08:00
Kadxy 0112b54bc7 fix: missing en translation 2025-01-16 22:35:26 +08:00
Kadxy 65810d918b feat: improve async operations and UI feedback 2025-01-16 21:31:19 +08:00
river 4d535b1cd0 chore: enhance mcp prompt 2025-01-16 20:54:24 +08:00
Kadxy 588d81e8f1 feat: remove unused files 2025-01-16 09:17:08 +08:00
Kadxy d4f499ee41 feat: adjust form style 2025-01-16 09:11:53 +08:00
Kadxy 4d63d73b2e feat: load MCP preset data from server 2025-01-16 09:00:57 +08:00
Kadxy 07c63497dc feat: support stop/start MCP servers 2025-01-16 08:52:54 +08:00
Kadxy e440ff56c8 fix: env not work 2025-01-15 18:47:05 +08:00
river c89e4883b2 chore: update icon 2025-01-15 17:31:18 +08:00
river ac3d940de8 Merge branch 'feat-mcp' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into feat-mcp 2025-01-15 17:29:43 +08:00
Kadxy be59de56f0 feat: Display the number of clients instead of the number of available tools. 2025-01-15 17:24:04 +08:00
river a70e9a3c01 chore:update mcp icon 2025-01-15 17:23:10 +08:00
Kadxy 8aa9a500fd feat: Optimize MCP configuration logic 2025-01-15 16:52:54 +08:00
RiverRay 93652db688
Update README.md 2025-01-13 16:57:50 +08:00
RiverRay 8421c483e8
Update README.md 2025-01-12 12:56:13 +08:00
Dogtiti 4ac27fdd4d
Merge pull request #6033 from lvguanjun/fix_fork_session
fix: prevent message sync between forked sessions by generating unique IDs
2025-01-11 16:19:02 +08:00
Dogtiti b6b2c501fd
Merge pull request #6034 from dupl/main
Correct the typos in user-manual-cn.md
2025-01-11 16:17:32 +08:00
Kadxy ce13cf61a7 feat: ignore mcp_config.json 2025-01-09 20:15:47 +08:00
Kadxy a3af563e89 feat: Reset mcp_config.json to empty 2025-01-09 20:13:16 +08:00
Kadxy e95c94d7be fix: inaccurate content 2025-01-09 20:10:10 +08:00
Kadxy 125a71fead fix: unnecessary initialization 2025-01-09 20:07:24 +08:00
Kadxy b410ec399c feat: auto scroll to bottom when MCP response 2025-01-09 20:02:27 +08:00
Kadxy 7d51bfd42e feat: MCP market 2025-01-09 19:51:01 +08:00
Kadxy 0c14ce6417 fix: MCP execution content matching failed. 2025-01-09 13:41:17 +08:00
Kadxy f2a2b40d2c feat: carry mcp primitives content as a system prompt 2025-01-09 10:20:56 +08:00
Kadxy 77be190d76 feat: carry mcp primitives content as a system prompt 2025-01-09 10:09:46 +08:00
dupl c56587c438
Correct the typos in user-manual-cn.md 2025-01-05 20:34:18 +08:00
lvguanjun 840c151ab9 fix: prevent message sync between forked sessions by generating unique IDs 2025-01-05 11:22:53 +08:00
RiverRay 0af04e0f2f
Merge pull request #5468 from DDMeaqua/feat-shortcutkey
feat: #5422 快捷键清除上下文
2024-12-31 16:23:10 +08:00
DDMeaqua d184eb6458 chore: cmd + shift+ backspace 2024-12-31 14:50:54 +08:00
DDMeaqua c5d9b1131e fix: merge bug 2024-12-31 14:38:58 +08:00
DDMeaqua e13408dd24 Merge branch 'main' into feat-shortcutkey 2024-12-31 14:30:09 +08:00
DDMeaqua aba4baf384 chore: update 2024-12-31 14:25:43 +08:00
DDMeaqua 6d84f9d3ae chore: update 2024-12-31 13:27:15 +08:00
Dogtiti 63c5baaa80
Merge pull request #6010 from code-october/fix-visionModels
修复 VISION_MDOELS 在 docker 运行阶段不生效的问题
2024-12-31 09:56:46 +08:00
Dogtiti defefba925
Merge pull request #6016 from bestsanmao/add_deepseek
fix issue #6009  add setting items for deepseek
2024-12-30 19:27:20 +08:00
suruiqiang 90c531c224 fix issue #6009 add setting items for deepseek 2024-12-30 18:23:18 +08:00
code-october 266e9efd2e rename the function 2024-12-30 09:13:12 +00:00
code-october 57c88c0717 修复 VISION_MDOELS 在 docker 运行阶段不生效的问题 2024-12-30 08:58:41 +00:00
DDMeaqua 5b5dea1c59 chore: 更换快捷键 2024-12-30 12:11:50 +08:00
Dogtiti d56566cd73
Merge pull request #6001 from bestsanmao/add_deepseek
docs: add DEEPSEEK_API_KEY and DEEPSEEK_URL in README
2024-12-30 09:42:22 +08:00
suruiqiang b5d104c908 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into add_deepseek 2024-12-30 09:04:40 +08:00
RiverRay f9e9129d52
Update README.md 2024-12-29 19:57:27 +08:00
suruiqiang 2a8a18391e docs: add DEEPSEEK_API_KEY and DEEPSEEK_URL in README 2024-12-29 15:31:50 +08:00
Dogtiti e1cb8e36fa
Merge pull request #5989 from bestsanmao/add_deepseek
since #5984, add DeepSeek as a new ModelProvider (with deepseek-chat&deepseek-coder models), so that user can use openai and deepseek at same time with different api url & key
2024-12-29 12:35:21 +08:00
suruiqiang b948d6bf86 bug fix 2024-12-29 11:24:57 +08:00
Kadxy fe67f79050 feat: MCP message type 2024-12-29 09:24:52 +08:00
suruiqiang 67338ff9b7 add KnowledgeCutOffDate for deepseek 2024-12-29 08:58:45 +08:00
suruiqiang 7380c8a2c1 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web into add_deepseek 2024-12-29 08:43:25 +08:00
Kadxy e1ba8f1b0f feat: Send MCP response as a user 2024-12-29 08:29:02 +08:00
Dogtiti c0062ff280
Merge pull request #5998 from dupl/main
Use regular expressions to make the code more concise.
2024-12-29 00:22:13 +08:00
dupl 39e593da48 Use regular expressions to make the code more concise. 2024-12-28 23:49:28 +08:00
Dogtiti f8b10ad8b1
Merge pull request #5997 from ChatGPTNextWeb/feature/glm-4v
feature: support glm-4v
2024-12-28 23:34:44 +08:00
Dogtiti 8a22c9d6db feature: support glm-4v 2024-12-28 23:33:06 +08:00
RiverRay 5f96804f3b
Merge pull request #5920 from fishshi/i18n
Use i18n for DISCOVERY
2024-12-28 22:05:37 +08:00
RiverRay 13430ea3e2
Merge pull request #5965 from zmhuanf/temp
Fix issue #5964: Prevents character loss in gemini-2.0-flash-thinking-exp-1219 responses
2024-12-28 22:02:02 +08:00
Kadxy 664879b9df feat: Create all MCP Servers at startup 2024-12-28 21:06:26 +08:00
Dogtiti 9df24e568b
Merge pull request #5996 from ChatGPTNextWeb/feature/cogview
Feature/cogview
2024-12-28 20:25:25 +08:00
Dogtiti bc322be448 fix: type error 2024-12-28 20:24:08 +08:00
Dogtiti a867adaf04 fix: size 2024-12-28 20:23:51 +08:00
Dogtiti 0cb186846a feature: support glm Cogview 2024-12-28 20:23:44 +08:00
Dogtiti e467ce028d
Merge pull request #5994 from ConnectAI-E/fix/failed-test
fix: failed unit test
2024-12-28 17:55:29 +08:00
Dogtiti cdfe907fb5 fix: failed unit test 2024-12-28 17:54:21 +08:00
Dogtiti d91af7f983
Merge pull request #5883 from code-october/fix/model-leak
fix model leak issue
2024-12-28 14:47:35 +08:00
Kadxy c3108ad333 feat: simple MCP example 2024-12-28 14:31:43 +08:00
suruiqiang 081daf937e since #5984, add DeepSeek as a new ModelProvider (with deepseek-chat&deepseek-corder models), so that user can use openai and deepseek at same time with different api url&key 2024-12-27 16:57:26 +08:00
RiverRay 0c3d4462ca
Merge pull request #5976 from ChatGPTNextWeb/Leizhenpeng-patch-1
Update README.md
2024-12-23 22:47:59 +08:00
RiverRay 3c859fc29f
Update README.md 2024-12-23 22:47:16 +08:00
river e1c7c54dfa chore: change md 2024-12-23 22:32:36 +08:00
zmhuanf 87b5e3bf62 修复bug; 2024-12-22 15:44:47 +08:00
Dogtiti 1d15666713
Merge pull request #5919 from Yiming3/feature/flexible-visual-model
feat: runtime configuration of vision-capable models
2024-12-22 10:37:57 +08:00
Yiming Zhang a127ae1fb4 docs: add VISION_MODELS section to README files 2024-12-21 13:12:41 -05:00
Yiming Zhang ea1329f73e fix: add optional chaining to prevent errors when accessing visionModels 2024-12-21 04:07:58 -05:00
Yiming Zhang 149d732cb7 Merge remote-tracking branch 'upstream/main' into feature/flexible-visual-model 2024-12-21 03:53:05 -05:00
Yiming Zhang 210b29bfbe refactor: remove NEXT_PUBLIC_ prefix from VISION_MODELS env var 2024-12-21 03:51:54 -05:00
Dogtiti acc2e97aab
Merge pull request #5959 from dupl/gemini
add gemini-exp-1206, gemini-2.0-flash-thinking-exp-1219
2024-12-21 16:30:09 +08:00
dupl 93ac0e5017
Reorganized the Gemini model 2024-12-21 15:26:33 +08:00
Yiming Zhang ed8c3580c8 test: add unit tests for isVisionModel utility function 2024-12-20 19:07:00 -05:00
dupl 0a056a7c5c add gemini-exp-1206, gemini-2.0-flash-thinking-exp-1219 2024-12-21 08:00:37 +08:00
Yiming Zhang 74c4711cdd Merge remote-tracking branch 'upstream/main' into feature/flexible-visual-model 2024-12-20 18:34:07 -05:00
Dogtiti eceec092cf
Merge pull request #5932 from fengzai6/update-google-models
Update google models to add gemini-2.0
2024-12-21 00:43:02 +08:00
Dogtiti 42743410a8
Merge pull request #5940 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/react-16.1.0
chore(deps-dev): bump @testing-library/react from 16.0.1 to 16.1.0
2024-12-21 00:41:45 +08:00
Dogtiti 0f04756d4c
Merge pull request #5936 from InitialXKO/main
面具“以文搜图”改成“AI文生图”,微调提示让图片生成更稳定无水印
2024-12-21 00:40:45 +08:00
dependabot[bot] acdded8161
chore(deps-dev): bump @testing-library/react from 16.0.1 to 16.1.0
Bumps [@testing-library/react](https://github.com/testing-library/react-testing-library) from 16.0.1 to 16.1.0.
- [Release notes](https://github.com/testing-library/react-testing-library/releases)
- [Changelog](https://github.com/testing-library/react-testing-library/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/react-testing-library/compare/v16.0.1...v16.1.0)

---
updated-dependencies:
- dependency-name: "@testing-library/react"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-16 10:57:34 +00:00
InitialXKO e939ce5a02
面具“以文搜图”改成“AI文生图”,微调提示让图片生成更稳定无水印 2024-12-13 22:29:14 +08:00
Nacho.L 46a0b100f7 Update versionKeywords 2024-12-13 08:29:43 +08:00
Nacho.L e27e8fb0e1 Update google models 2024-12-13 07:22:16 +08:00
fishshi 93c5320bf2 Use i18n for DISCOVERY 2024-12-10 15:56:04 +08:00
Yiming Zhang a433d1606c feat: use regex patterns for vision models and allow adding capabilities to models through env var NEXT_PUBLIC_VISION_MODELS. 2024-12-10 00:22:45 -05:00
code-october cc5e16b045 update unit test 2024-11-30 07:30:52 +00:00
code-october 54f6feb2d7 update unit test 2024-11-30 07:28:38 +00:00
code-october e1ac0538b8 add unit test 2024-11-30 07:22:24 +00:00
code-october 1a678cb4d8 fix model leak issue 2024-11-29 15:47:28 +00:00
Dogtiti 83cea3a90d
Merge pull request #5879 from frostime/textline-custom-model
🎨 style(setting): Place custom-model's input a separated row.
2024-11-28 12:02:42 +08:00
frostime 759a09a76c 🎨 style(setting): Place custom-model's input a seperated row. 2024-11-27 13:11:18 +08:00
Dogtiti 2623a92763
Merge pull request #5850 from code-october/fix-o1
Fix o1
2024-11-25 12:31:36 +08:00
Dogtiti 3932c594c7
Merge pull request #5861 from code-october/update-model
update new model for gpt-4o and gemini-exp
2024-11-22 20:59:30 +08:00
code-october b7acb89096 update new model for gpt-4o and gemini-exp 2024-11-22 09:48:50 +00:00
code-october ef24d3e633 use stream when request o1 2024-11-21 03:46:10 +00:00
code-october 23350c842b fix o1 in disableGPT4 2024-11-21 03:45:07 +00:00
Dogtiti a2adfbbd32
Merge pull request #5821 from Sherlocksuper/scroll
feat: support more user-friendly scrolling
2024-11-16 15:24:46 +08:00
Lloyd Zhou f22cec1eb4
Merge pull request #5827 from ConnectAI-E/fix/markdown-embed-codeblock
fix: 代码块嵌入小代码块时渲染错误
2024-11-15 16:03:27 +08:00
opchips e56216549e fix: 代码块嵌入小代码块时渲染错误 2024-11-15 11:56:26 +08:00
Sherlock 19facc7c85 feat: support mort user-friendly scrolling 2024-11-14 21:31:45 +08:00
Lloyd Zhou b08ce5630c
Merge pull request #5819 from ConnectAI-E/fix-gemini-summary
Fix gemini summary
2024-11-13 15:17:44 +08:00
DDMeaqua b41c012d27 chore: shouldStream 2024-11-13 15:12:46 +08:00
Lloyd Zhou a392daab71
Merge pull request #5816 from ConnectAI-E/feature/artifacts-svg
artifacts support svg
2024-11-13 14:58:33 +08:00
DDMeaqua 0628ddfc6f chore: update 2024-11-13 14:27:41 +08:00
DDMeaqua 7eda14f138 fix: [#5308] gemini对话总结 2024-11-13 14:24:44 +08:00
opchips 9a86c42c95 update 2024-11-12 16:33:55 +08:00
Lloyd Zhou 819d249a09
Merge pull request #5815 from LovelyGuYiMeng/main
更新视觉模型匹配关键词
2024-11-12 15:04:11 +08:00
LovelyGuYiMeng 8d66fedb1f
Update visionKeywords 2024-11-12 14:28:11 +08:00
Lloyd Zhou 7cf89b53ce
Merge pull request #5812 from ConnectAI-E/fix/rerender-chat
fix: use current session id to trigger rerender
2024-11-12 13:49:51 +08:00
Dogtiti 459c373f13
Merge pull request #5807 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/jest-dom-6.6.3
chore(deps-dev): bump @testing-library/jest-dom from 6.6.2 to 6.6.3
2024-11-11 20:59:56 +08:00
Dogtiti 1d14a991ee fix: use current session id to trigger rerender 2024-11-11 20:30:59 +08:00
dependabot[bot] 05ef5adfa7
chore(deps-dev): bump @testing-library/jest-dom from 6.6.2 to 6.6.3
Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 6.6.2 to 6.6.3.
- [Release notes](https://github.com/testing-library/jest-dom/releases)
- [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/jest-dom/compare/v6.6.2...v6.6.3)

---
updated-dependencies:
- dependency-name: "@testing-library/jest-dom"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-11 10:53:00 +00:00
lloydzhou 38fa3056df update version 2024-11-11 13:26:08 +08:00
Lloyd Zhou 289aeec8af
Merge pull request #5786 from ConnectAI-E/feature/realtime-chat
Feature/realtime chat
2024-11-11 13:19:26 +08:00
lloydzhou 7d71da938f remove close-24 svg 2024-11-11 13:15:09 +08:00
Lloyd Zhou f8f6954115
Merge pull request #5779 from ConnectAI-E/feature/model/claude35haiku
add claude35haiku & not support vision
2024-11-11 13:13:09 +08:00
Lloyd Zhou 6e03f32871
Merge pull request #5795 from JingSyue/main
fix: built-in plugin dalle3 error #5787
2024-11-11 13:10:00 +08:00
JingSyue 18a6571883
Update proxy.ts
Update proxy.ts
2024-11-11 12:59:29 +08:00
Dogtiti 14f444e1f0 doc: realtime chat 2024-11-11 11:47:41 +08:00
JingSyue 2b0f2e5f9d fix: built-in plugin dalle3 error #5787 2024-11-10 10:28:25 +08:00
Dogtiti 4629b39c29 chore: comment context history 2024-11-09 16:22:01 +08:00
Dogtiti d33e772fa5 feat: voice print 2024-11-08 22:39:17 +08:00
Dogtiti 89136fba32 feat: voice print 2024-11-08 22:18:39 +08:00
Dogtiti 8b4ca133fd feat: voice print 2024-11-08 22:02:31 +08:00
lloydzhou a4c9eaf6cd do not save empty audio file 2024-11-08 13:43:13 +08:00
lloydzhou 50e63109a3 merge code and get analyser data 2024-11-08 13:21:40 +08:00
Dogtiti 48a1e8a584 chore: i18n 2024-11-07 21:32:47 +08:00
Dogtiti e44ebe3f0e feat: realtime config 2024-11-07 21:28:23 +08:00
Lloyd Zhou 108069a0c6
Merge pull request #5788 from ConnectAI-E/fix-o1-maxtokens
chore: o1模型使用max_completion_tokens
2024-11-07 20:06:30 +08:00
DDMeaqua d5bda2904d chore: o1模型使用max_completion_tokens 2024-11-07 19:45:27 +08:00
lloydzhou 283caba8ce stop streaming play after get input audio. 2024-11-07 18:57:57 +08:00
lloydzhou b78e5db817 add temperature config 2024-11-07 17:55:51 +08:00
lloydzhou 46c469b2d7 add voice config 2024-11-07 17:47:55 +08:00
lloydzhou c00ebbea4f update 2024-11-07 17:40:03 +08:00
lloydzhou c526ff80b5 update 2024-11-07 17:23:20 +08:00
lloydzhou 0037b0c944 ts error 2024-11-07 17:03:04 +08:00
lloydzhou 6f81bb3b8a add context after connected 2024-11-07 16:56:15 +08:00
lloydzhou 7bdc45ed3e connect realtime model when open panel 2024-11-07 16:41:24 +08:00
Dogtiti 88cd3ac122 fix: ts error 2024-11-07 12:16:11 +08:00
Dogtiti 4988d2ee26 fix: ts error 2024-11-07 11:56:58 +08:00
lloydzhou 8deb7a92ee hotfix for update target session 2024-11-07 11:53:01 +08:00
lloydzhou db060d732a upload save record wav file 2024-11-07 11:45:38 +08:00
lloydzhou 522627820a upload save wav file logic 2024-11-07 09:36:22 +08:00
lloydzhou cf46d5ad63 upload response audio, and update audio_url to session message 2024-11-07 01:12:08 +08:00
Dogtiti a4941521d0 feat: audio to message 2024-11-06 22:30:02 +08:00
Dogtiti f6e1f8398b wip 2024-11-06 22:07:33 +08:00
Dogtiti d544eead38 feat: realtime chat ui 2024-11-06 21:14:45 +08:00
Lloyd Zhou fbb9385f23
Merge pull request #5782 from ConnectAI-E/style/classname
style: improve classname by clsx
2024-11-06 20:33:51 +08:00
Dogtiti 18144c3d9c chore: clsx 2024-11-06 20:16:38 +08:00
opchips 64aa760e58 update claude rank 2024-11-06 19:18:05 +08:00
Dogtiti e0bbb8bb68 style: improve classname by clsx 2024-11-06 16:58:26 +08:00
opchips 6667ee1c7f merge main 2024-11-06 15:08:18 +08:00
Lloyd Zhou 6ded4e96e7
Merge pull request #5778 from ConnectAI-E/fix/5436
fix: botMessage reply date
2024-11-06 15:04:46 +08:00
Dogtiti 85cdcab850 fix: botMessage reply date 2024-11-06 14:53:08 +08:00
Lloyd Zhou f4c9410c29
Merge pull request #5776 from ConnectAI-E/feat-glm
fix: glm chatpath
2024-11-06 14:02:20 +08:00
DDMeaqua adf7d8200b fix: glm chatpath 2024-11-06 13:55:57 +08:00
opchips 3086a2fa77 add claude35haiku not vision 2024-11-06 12:56:24 +08:00
Lloyd Zhou f526d6f560
Merge pull request #5774 from ConnectAI-E/feature/update-target-session
fix: updateCurrentSession => updateTargetSession
2024-11-06 11:16:33 +08:00
Dogtiti 106461a1e7 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/update-target-session 2024-11-06 11:08:41 +08:00
Dogtiti c4e19dbc59 fix: updateCurrentSession => updateTargetSession 2024-11-06 11:06:18 +08:00
Dogtiti f3603e59fa
Merge pull request #5769 from ryanhex53/fix-model-multi@
Custom model names can include the `@` symbol by itself.
2024-11-06 10:49:28 +08:00
ryanhex53 8e2484fcdf Refactor: Replace all provider split occurrences with getModelProvider utility method 2024-11-05 13:52:54 +00:00
lloydzhou 00d6cb27f7 update version 2024-11-05 17:42:55 +08:00
ryanhex53 b844045d23 Custom model names can include the `@` symbol by itself.
To specify the model's provider, append it after the model name using `@` as before.

This format supports cases like `google vertex ai` with a model name like `claude-3-5-sonnet@20240620`.

For instance, `claude-3-5-sonnet@20240620@vertex-ai` will be split by `split(/@(?!.*@)/)` into:

`[ 'claude-3-5-sonnet@20240620', 'vertex-ai' ]`, where the former is the model name and the latter is the custom provider.
2024-11-05 07:44:12 +00:00
Lloyd Zhou e49fe976d9
Merge pull request #5765 from ConnectAI-E/feature/onfinish
feat: update real 'currentSession'
2024-11-05 15:07:52 +08:00
Dogtiti 14f751965f
Merge pull request #5767 from ConnectAI-E/feat-glm
chore: update readme
2024-11-05 11:07:52 +08:00
DDMeaqua 0ec423389f chore: update readme 2024-11-05 11:06:20 +08:00
Dogtiti 820ab54e2d
Merge pull request #5766 from ConnectAI-E/feature/add-claude-haiku3.5
Feature/add claude haiku3.5
2024-11-05 10:54:52 +08:00
lloydzhou a6c1eb27a8 add claude 3.5 haiku 2024-11-05 10:23:15 +08:00
Lloyd Zhou 0dc4071ccc
Merge pull request #5464 from endless-learner/main
Added 1-click deployment link for Alibaba Cloud.
2024-11-05 01:10:06 +08:00
Lloyd Zhou 4d3949718a
merge main 2024-11-05 01:09:27 +08:00
Dogtiti aef535f1a7
Merge pull request #5753 from ChatGPTNextWeb/feat-bt-doc
Feat bt doc
2024-11-04 21:41:11 +08:00
Dogtiti 686a80e727
Merge pull request #5764 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/react-16.0.1
chore(deps-dev): bump @testing-library/react from 16.0.0 to 16.0.1
2024-11-04 21:37:34 +08:00
Dogtiti e49466fa05 feat: update real 'currentSession' 2024-11-04 21:25:56 +08:00
dependabot[bot] 4b93370814
chore(deps-dev): bump @testing-library/react from 16.0.0 to 16.0.1
Bumps [@testing-library/react](https://github.com/testing-library/react-testing-library) from 16.0.0 to 16.0.1.
- [Release notes](https://github.com/testing-library/react-testing-library/releases)
- [Changelog](https://github.com/testing-library/react-testing-library/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/react-testing-library/compare/v16.0.0...v16.0.1)

---
updated-dependencies:
- dependency-name: "@testing-library/react"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-04 10:24:30 +00:00
Dogtiti 5733e3c588
Merge pull request #5759 from ConnectAI-E/feature/onfinish
Feature/onfinish
2024-11-04 17:16:44 +08:00
Dogtiti 44fc5b5cbf fix: onfinish responseRes 2024-11-04 17:00:45 +08:00
Dogtiti 2d3f7c922f fix: vision model dalle3 2024-11-04 15:51:04 +08:00
GH Action - Upstream Sync fe8cca3730 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-11-02 01:12:09 +00:00
weige fbb7a1e853 fix 2024-11-01 18:20:16 +08:00
weige fb2c15567d fix 2024-11-01 17:45:50 +08:00
weige c2c52a1f60 fix 2024-11-01 17:35:34 +08:00
weige 106ddc17cd fix 2024-11-01 17:35:09 +08:00
weige 17d5209738 add bt install doc 2024-11-01 17:28:20 +08:00
Dogtiti d66bfc6352
Merge pull request #5752 from ConnectAI-E/feat-glm
fix: ts error
2024-11-01 14:16:50 +08:00
DDMeaqua 4d75b23ed1 fix: ts error 2024-11-01 14:15:12 +08:00
Dogtiti 36bfa2ef7c
Merge pull request #5741 from ConnectAI-E/feat-glm
feat: [#5714] 支持GLM
2024-11-01 13:57:30 +08:00
DDMeaqua afe12c212e chore: update 2024-11-01 13:53:43 +08:00
GH Action - Upstream Sync adf97c6d8b Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-11-01 01:18:59 +00:00
DDMeaqua 7a8d557ea3 chore: 开启插件 2024-10-31 11:37:19 +08:00
DDMeaqua d3f0a77830 chore: update Provider 2024-10-31 11:23:06 +08:00
Dogtiti 0581e37236
Merge pull request #5744 from mrcore/main
add  claude-3-5-sonnet-latest and claude-3-opus-latest
2024-10-31 11:19:34 +08:00
Core 44383a8b33
add claude-3-5-sonnet-latest and claude-3-opus-latest
add  claude-3-5-sonnet-latest and claude-3-opus-latest
2024-10-31 11:00:45 +08:00
GH Action - Upstream Sync 7c466c9b9c Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-10-31 01:14:28 +00:00
Dogtiti a0fa4d7e72
Merge pull request #5737 from hyiip/claude3.5
add constant to claude 3.5 sonnet 20241022
2024-10-31 00:13:16 +08:00
DDMeaqua d357b45e84 feat: [#5714] 支持GLM 2024-10-30 19:24:03 +08:00
Lloyd Zhou d0bd1bf8fd
Merge pull request #5740 from yuxuan-ctrl/main
feat: 新增阿里系模型代码配置
2024-10-30 16:56:53 +08:00
yuxuan-ctrl 86ffa1e643 feat: 新增阿里系模型代码配置 2024-10-30 16:30:01 +08:00
endless-learner b0d28eb77e
Merge branch 'main' into main 2024-10-29 14:38:49 -07:00
hyiip 736cbdbdd1 add constant to claude 3.5 sonnet 20241022 2024-10-30 02:18:41 +08:00
Dogtiti 613d67eada
Merge pull request #5729 from ConnectAI-E/feature/jest
chore: improve jest
2024-10-29 19:39:59 +08:00
Dogtiti 89cea18955 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/jest 2024-10-29 19:26:52 +08:00
Dogtiti 56bc77d20b
Merge pull request #5731 from ChatGPTNextWeb/dependabot/npm_and_yarn/testing-library/jest-dom-6.6.2
Bump @testing-library/jest-dom from 6.4.8 to 6.6.2
2024-10-28 21:52:08 +08:00
Dogtiti 6d93d37963
Merge pull request #5732 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/jest-29.5.14
Bump @types/jest from 29.5.13 to 29.5.14
2024-10-28 21:51:59 +08:00
dependabot[bot] 24df85cf9d
Bump @types/jest from 29.5.13 to 29.5.14
Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.13 to 29.5.14.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest)

---
updated-dependencies:
- dependency-name: "@types/jest"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:31:34 +00:00
dependabot[bot] a4d7a2c6e3
Bump @testing-library/jest-dom from 6.4.8 to 6.6.2
Bumps [@testing-library/jest-dom](https://github.com/testing-library/jest-dom) from 6.4.8 to 6.6.2.
- [Release notes](https://github.com/testing-library/jest-dom/releases)
- [Changelog](https://github.com/testing-library/jest-dom/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testing-library/jest-dom/compare/v6.4.8...v6.6.2)

---
updated-dependencies:
- dependency-name: "@testing-library/jest-dom"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:31:27 +00:00
Dogtiti 49d42bb45d chore: improve jest 2024-10-28 16:47:05 +08:00
Lloyd Zhou 4f49626303
Merge pull request #5722 from ElricLiu/main
Update README.md
2024-10-26 12:09:09 +08:00
ElricLiu 45db20c1c3
Update README.md 2024-10-26 11:16:43 +08:00
Lloyd Zhou 82994843f5
Merge pull request #5719 from ConnectAI-E/hotfix/status_text_error
hotfix for statusText is non ISO-8859-1 #5717
2024-10-25 20:34:15 +08:00
Dogtiti 1110a087a0
Merge pull request #5720 from ConnectAI-E/hotfix/gemini_invald_argument
hotfix for gemini invald argument #5715
2024-10-25 18:25:46 +08:00
lloydzhou f0b3e10a6c hotfix for gemini invald argument #5715 2024-10-25 18:19:22 +08:00
lloydzhou f89872b833 hotfix for gemini invald argument #5715 2024-10-25 18:12:09 +08:00
lloydzhou 90ced92876 update 2024-10-25 18:05:29 +08:00
lloydzhou 2c74559010 hitfix 2024-10-25 18:02:51 +08:00
lloydzhou e3ca7e8b44 hotfix for statusText is non ISO-8859-1 #5717 2024-10-25 17:52:08 +08:00
lloydzhou 4745706c42 update version to v2.15.6 2024-10-24 15:32:27 +08:00
lloydzhou 801dc412f9 add claude-3.5-haiku 2024-10-24 15:28:05 +08:00
Dogtiti c7c2c0211a
Merge pull request #5704 from ConnectAI-E/feature/xai
xAi support
2024-10-23 14:13:17 +08:00
lloydzhou 65bb962fc0 hotfix 2024-10-23 12:00:59 +08:00
lloydzhou e791cd441d add xai 2024-10-23 11:55:25 +08:00
lloydzhou 8455fefc8a add xai 2024-10-23 11:40:06 +08:00
Lloyd Zhou 06f897f32f
Merge pull request #5679 from ConnectAI-E/fix/fetch
fix: use tauri fetch
2024-10-16 22:02:16 +08:00
Dogtiti deb1e76c41 fix: use tauri fetch 2024-10-16 21:57:07 +08:00
lloydzhou 463fa743e9 update version 2024-10-15 16:10:44 +08:00
Dogtiti cda4494cec
Merge pull request #5632 from ConnectAI-E/feature/H0llyW00dzZ-updater
Feature/h0lly w00dz z updater
2024-10-15 14:31:49 +08:00
lloydzhou 87d85c10c3 update 2024-10-14 21:48:36 +08:00
Dogtiti 22f83c9e11
Merge pull request #5666 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/jest-29.5.13
Bump @types/jest from 29.5.12 to 29.5.13
2024-10-14 20:36:53 +08:00
dependabot[bot] 7f454cbcec
Bump @types/jest from 29.5.12 to 29.5.13
Bumps [@types/jest](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/jest) from 29.5.12 to 29.5.13.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/jest)

---
updated-dependencies:
- dependency-name: "@types/jest"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-14 10:49:46 +00:00
lloydzhou 426269d795 Merge remote-tracking branch 'connectai/main' into feature/H0llyW00dzZ-updater 2024-10-14 17:12:08 +08:00
Lloyd Zhou 370f143157
Merge pull request #5661 from ChatGPTNextWeb/remove-pr-preview
update test run target
2024-10-14 17:11:26 +08:00
lloydzhou 103106bb93 update test run target 2024-10-14 17:10:02 +08:00
lloydzhou 2419083adf Merge remote-tracking branch 'connectai/main' into feature/H0llyW00dzZ-updater 2024-10-14 17:04:12 +08:00
Lloyd Zhou c25903bfb4
Merge pull request #5658 from ccq18/main
fix  o1系列模型超时时间改为4分钟,
2024-10-14 16:57:29 +08:00
Lloyd Zhou e34c266438
Merge pull request #5660 from ChatGPTNextWeb/remove-pr-preview
update deploy_preview run target
2024-10-14 16:55:48 +08:00
lloydzhou 8c39a687b5 update deploy_preview run target 2024-10-14 16:53:46 +08:00
ccq18 592f62005b 仅修改o1的超时时间为4分钟,减少o1系列模型请求失败的情况 2024-10-14 16:31:17 +08:00
ccq18 12e7caa209 fix 默认超时时间改为3分钟,支持o1-mini 2024-10-14 16:03:01 +08:00
Lloyd Zhou b016771555
Merge pull request #5599 from ConnectAI-E/feature/allow-send-image-only
feat: allow send image only
2024-10-14 15:11:28 +08:00
Dogtiti a84383f919
Merge pull request #5647 from code-october/fix/setting-locale
修改“压缩模型”名称,增加“生成对话标题”的功能提示
2024-10-13 01:49:51 +08:00
code-october 7f68fb1ff2 修改“压缩模型”名称,增加“生成对话标题”的功能提示 2024-10-12 16:49:24 +00:00
Dogtiti 8d2003fe68
Merge pull request #5644 from ConnectAI-E/fix/siderbar-style
fix: sidebar style
2024-10-12 14:56:01 +08:00
Dogtiti 9961b513cc fix: sidebar style 2024-10-12 14:54:22 +08:00
Dogtiti 819238acaf fix: i18n 2024-10-11 20:49:43 +08:00
Dogtiti ad49916b1c
Merge pull request #5638 from ConnectAI-E/chore/test-action
chore: improve test
2024-10-11 20:44:20 +08:00
Dogtiti d18bd8a48a
Merge pull request #5640 from code-october/feature/enableCodeFold
支持前端使能/禁用代码折叠
2024-10-11 20:43:43 +08:00
code-october 4a1319f2c0 代码安全优化 2024-10-11 11:57:23 +00:00
code-october 8fd843d228 参考coderabbitai建议规范代码 2024-10-11 11:38:52 +00:00
code-october 6792d6e475 支持前端使能/禁用代码折叠 2024-10-11 11:19:36 +00:00
Lloyd Zhou c139038e01
Merge pull request #5639 from code-october/fix/auth-ui
优化访问码输入框
2024-10-11 19:11:35 +08:00
code-october 4a7fd3a380 优化首页 api 输入框 2024-10-11 10:36:11 +00:00
code-october c98dc31cdf 优化访问码输入框 2024-10-11 09:03:20 +00:00
Dogtiti bd43af3a8d chore: cache node_modules 2024-10-11 15:41:46 +08:00
Dogtiti be98aa2078 chore: improve test 2024-10-11 15:17:38 +08:00
lloydzhou a0d4a04192 update 2024-10-11 11:52:24 +08:00
lloydzhou bd9de4dc4d fix version compare 2024-10-11 11:42:36 +08:00
lloydzhou 2eebfcf6fe client app tauri updater #2966 2024-10-11 11:29:22 +08:00
Lloyd Zhou c5074f0aa4
Merge pull request #5581 from ConnectAI-E/feature/gemini-functioncall
google gemini support function call
2024-10-10 21:02:36 +08:00
Lloyd Zhou ba58018a15
Merge pull request #5211 from ConnectAI-E/feature/jest
feat: jest
2024-10-10 21:02:05 +08:00
Lloyd Zhou 63ab83c3c8
Merge pull request #5621 from ConnectAI-E/hotfix/plugin-result
hotfix plugin result is not string #5614
2024-10-10 12:48:55 +08:00
lloydzhou 268cf3b606 hotfix plugin result is not string #5614 2024-10-10 12:47:25 +08:00
Lloyd Zhou fbc68fa776
Merge pull request #5602 from PeterDaveHello/ImproveTwLocale
i18n: improve tw Traditional Chinese locale
2024-10-09 19:38:06 +08:00
lloydzhou 4ae34ea3ee merge main 2024-10-09 18:27:23 +08:00
Lloyd Zhou 96273fd75e
Merge pull request #5611 from ConnectAI-E/feature/tauri-fetch-update
make sure get request_id before body chunk
2024-10-09 16:18:37 +08:00
lloydzhou 3e63d405c1 update 2024-10-09 16:12:01 +08:00
Lloyd Zhou 19b42aac5d
Merge pull request #5608 from ConnectAI-E/fix-readme
fix: [#5574] readme
2024-10-09 14:49:34 +08:00
Lloyd Zhou b67a23200e
Merge pull request #5610 from ChatGPTNextWeb/lloydzhou-patch-1
Update README.md
2024-10-09 14:48:55 +08:00
Lloyd Zhou 1dac02e4d6
Update README.md 2024-10-09 14:48:43 +08:00
Lloyd Zhou acad5b1d08
Merge pull request #5609 from ElricLiu/main
Update README.md
2024-10-09 14:45:27 +08:00
ElricLiu 4e9bb51d2f
Update README.md 2024-10-09 14:43:49 +08:00
DDMeaqua c0c8cdbbf3 fix: [#5574] 文档错误 2024-10-09 14:36:58 +08:00
Lloyd Zhou cbdc611b54
Merge pull request #5607 from ConnectAI-E/hotfix/summarize-model
fix compressModel, related #5426, fix #5606 #5603 #5575
2024-10-09 14:08:13 +08:00
lloydzhou 93ca303b6c fix ts error 2024-10-09 13:49:33 +08:00
lloydzhou a925b424a8 fix compressModel, related #5426, fix #5606 #5603 #5575 2024-10-09 13:42:25 +08:00
Lloyd Zhou 5b4d423b58
Merge pull request #5565 from ConnectAI-E/feature/using-tauri-fetch
Feat: using tauri fetch api in App
2024-10-09 13:03:01 +08:00
lloydzhou 6c1cbe120c update 2024-10-09 11:46:49 +08:00
Peter Dave Hello 77a58bc4b0 i18n: improve tw Traditional Chinese locale 2024-10-09 03:14:38 +08:00
Dogtiti 7d55a6d0e4 feat: allow send image only 2024-10-08 21:31:01 +08:00
Dogtiti 8ad63a6c25
Merge pull request #5586 from little-huang/patch-1
fix: correct typo in variable name from ALLOWD_PATH to ALLOWED_PATH
2024-10-08 15:26:41 +08:00
Dogtiti acf9fa36f9 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/jest 2024-10-08 10:30:47 +08:00
Dogtiti 461154bb03 fix: format package 2024-10-08 10:29:42 +08:00
little_huang cd75461f9e
fix: correct typo in variable name from ALLOWD_PATH to ALLOWED_PATH 2024-10-07 10:30:25 +08:00
Dogtiti 2bac174e6f
Merge pull request #4393 from ChatGPTNextWeb/dean-delete-escapeDollarNumber
bugfix: Delete the escapeDollarNumber function, which causes errors i…
2024-10-06 12:41:03 +08:00
Lloyd Zhou 65f80f81ad
Merge branch 'main' into dean-delete-escapeDollarNumber 2024-10-04 14:31:00 +08:00
lloydzhou 450766a44b google gemini support function call 2024-10-03 20:28:15 +08:00
Lloyd Zhou 05e6e4bffb
Merge pull request #5578 from code-october/fix/safe-equal
use safe equal operation
2024-10-03 10:59:32 +08:00
code-october fbb66a4a5d use safe equal operation 2024-10-03 02:08:10 +00:00
lloydzhou d51d31a559 update 2024-10-01 14:40:23 +08:00
lloydzhou 919ee51dca hover show errorMsg when plugin run error 2024-10-01 13:58:50 +08:00
lloydzhou 9c577ad9d5 hotfix for plugin runtime 2024-10-01 12:55:57 +08:00
lloydzhou 953114041b add connect timeout 2024-10-01 12:02:29 +08:00
lloydzhou d830c23dab hotfix for run plugin call post api 2024-09-30 15:38:13 +08:00
lloydzhou fd3568c459 hotfix for run plugin call post api 2024-09-30 15:33:40 +08:00
lloydzhou 3029dcb2f6 hotfix for run plugin call post api 2024-09-30 15:32:47 +08:00
lloydzhou 35e03e1bca remove code 2024-09-30 13:44:01 +08:00
Lloyd Zhou cea5b91f96
Merge pull request #5567 from ChatGPTNextWeb/fix-readme
update  readme
2024-09-30 13:31:34 +08:00
lyf d2984db6e7 fix readme 2024-09-30 13:28:14 +08:00
lyf deb215ccd1 fix readme 2024-09-30 13:23:24 +08:00
lloydzhou 7173cf2184 update 2024-09-30 13:07:06 +08:00
Lloyd Zhou 0c697e123d
Merge pull request #5564 from code-october/fix/html-code
fix quoteEnd extract regex
2024-09-30 13:06:52 +08:00
lloydzhou edfa6d14ee update 2024-09-30 10:23:24 +08:00
lloydzhou b6d9ba93fa update 2024-09-30 10:18:30 +08:00
lloydzhou 6293b95a3b update default api base url 2024-09-30 10:13:11 +08:00
lloydzhou ef4665cd8b update 2024-09-30 02:57:51 +08:00
lloydzhou 8030e71a5a update 2024-09-30 02:33:02 +08:00
lloydzhou f42488d4cb using stream fetch replace old tauri http fetch 2024-09-30 02:28:19 +08:00
lloydzhou af49ed4fdc update 2024-09-30 01:51:14 +08:00
lloydzhou b174a40634 update 2024-09-30 01:44:27 +08:00
lloydzhou 3c01738c29 update 2024-09-30 01:37:16 +08:00
lloydzhou 9be58f3eb4 fix ts error 2024-09-30 01:30:20 +08:00
lloydzhou a50c282d01 remove DEFAULT_API_HOST 2024-09-30 01:19:20 +08:00
lloydzhou 5141145e4d revert plugin runtime using tarui/api/http, not using fetch_stream 2024-09-30 00:58:50 +08:00
lloydzhou b5f6e5a598 update 2024-09-30 00:38:30 +08:00
lloydzhou 7df308d655 Merge remote-tracking branch 'connectai/main' into feature/using-tauri-fetch 2024-09-29 23:36:17 +08:00
code-october f5ad51a35e fix quoteEnd extract regex 2024-09-29 14:29:42 +00:00
lloydzhou f9d4105170 stash code 2024-09-29 21:47:38 +08:00
lloydzhou 9e6ee50fa6 using stream_fetch in App 2024-09-29 20:32:36 +08:00
lloydzhou dd77ad5d74 Merge remote-tracking branch 'connectai/main' into feature/using-tauri-fetch 2024-09-29 19:44:28 +08:00
lloydzhou 3898c507c4 using stream_fetch in App 2024-09-29 19:44:09 +08:00
Lloyd Zhou fcba50f041
Merge pull request #5547 from ConnectAI-E/hotfix/plugin-opration-id
Hotfix/plugin opration
2024-09-29 16:15:02 +08:00
Lloyd Zhou 452fc86ad1
Merge pull request #5562 from ChatGPTNextWeb/hotfix-google-api
hotfix for `x-goog-api-key`
2024-09-29 15:57:20 +08:00
lloydzhou 5bdf411399 hotfix for `x-goog-api-key` 2024-09-29 15:51:28 +08:00
lloydzhou 2d920f7ccc using stream: schema to fetch in App 2024-09-28 15:05:41 +08:00
lloydzhou d84d51b475 using sse: schema to fetch in App 2024-09-28 01:19:39 +08:00
Dogtiti f9d6f4f9da
Merge pull request #5553 from ConnectAI-E/fix/default-model
fix: default model
2024-09-27 21:13:26 +08:00
Lloyd Zhou a13bd624e8
Merge pull request #5552 from joetsuihk/hotfix/upstream-sync-doc
docs: Hotfix/upstream sync doc update
2024-09-27 20:36:16 +08:00
Joe 8fb019b2e2
revert, leave sync.yml untouched
revert commit 19c4ed4463
2024-09-27 17:34:38 +08:00
Joe 2f3457e73d
Update correct links to manualy code update section (JP) 2024-09-27 17:33:02 +08:00
Dogtiti c6ebd6e73c fix: default model 2024-09-27 17:00:24 +08:00
Joe 2333a47c55
Update links in doc to manual code update section (CN) 2024-09-27 16:50:51 +08:00
Joe b35895b551
Update correct links to manualy code update section 2024-09-27 16:49:08 +08:00
Joe 19c4ed4463
docs links updated sync.yml
https://github.com/Yidadaa/ChatGPT-Next-Web is renamed to https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/
2024-09-27 16:43:50 +08:00
lloydzhou 22aa1698b4 try using method and path when operationId is undefined #5525 2024-09-27 13:31:49 +08:00
lloydzhou 07d089a2bd try using method and path when operationId is undefined #5525 2024-09-27 13:31:07 +08:00
Dogtiti 870ad913cc
Merge pull request #5545 from ConnectAI-E/hotfix/google-auth-header
fix: build error
2024-09-27 11:43:43 +08:00
Dogtiti 3fb389551b fix: build error 2024-09-27 11:42:16 +08:00
Dogtiti d12a4adfb5
Merge pull request #5541 from ConnectAI-E/hotfix/google-auth-header
google api using `x-google-api-key` header
2024-09-27 11:04:10 +08:00
lloydzhou 702e17c96b google api using `x-google-api-key` header 2024-09-26 23:21:42 +08:00
Lloyd Zhou 93ff7d26cc
Merge pull request #5529 from Leizhenpeng/support-saas-readme
Support saas version in readme
2024-09-25 16:34:25 +08:00
river 13777786c4 chore: ja 2024-09-25 16:30:26 +08:00
river 6655c64e55 chore: cn 2024-09-25 16:29:59 +08:00
Dogtiti 7f3f6f1aaf
Merge pull request #5528 from ChatGPTNextWeb/add_tip_top
fix url i18
2024-09-25 16:21:33 +08:00
mayfwl ea04595c5e
Merge branch 'main' into add_tip_top 2024-09-25 16:19:55 +08:00
lyf 13c68bd810 fix url utm 2024-09-25 16:11:57 +08:00
lyf 1d2f44fba8 fix url 2024-09-25 16:00:48 +08:00
lloydzhou 68702bfb1f update version 2024-09-25 15:49:20 +08:00
lyf e83f61e74d fix 2024-09-25 15:21:17 +08:00
lyf 10d472e79e fix 2024-09-25 14:41:41 +08:00
lyf a6b920d9af fix 2024-09-25 14:35:09 +08:00
endless-learner 064e964d75
Updated link to deploy on Alibaba Cloud, readable when not logged in, also, able to choose region. 2024-09-24 23:05:32 -07:00
endless-learner 47fb40d572
Merge branch 'ChatGPTNextWeb:main' into main 2024-09-24 23:03:03 -07:00
river b7892b58f5 chore: support saas 2024-09-25 13:34:04 +08:00
lyf 77f037a3c4 add maidian 2024-09-25 13:08:03 +08:00
lyf 248d27680d fix style 2024-09-25 11:15:00 +08:00
Lloyd Zhou 4c84182e7a
Merge pull request #5522 from ConnectAI-E/fix/5494
fix: prevent title update on invalid message response
2024-09-25 10:53:00 +08:00
endless-learner 9e18cc260b
Update README.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2024-09-24 13:55:00 -07:00
Dogtiti e8581c8f3c fix: prevent title update on invalid message response 2024-09-25 00:37:37 +08:00
lyf fe4cba8baf fix style slect button 2024-09-24 22:25:36 +08:00
Dogtiti 9bbd7d3185
Merge pull request #5519 from ConnectAI-E/feature-play-audio-and-video
Feature play audio and video
2024-09-24 22:03:55 +08:00
lloydzhou dbabb2c403 auto play video/audio 2024-09-24 18:52:54 +08:00
lloydzhou 6c37d04591 auto play video/audio 2024-09-24 18:52:48 +08:00
Dogtiti 649c5be64e
Merge pull request #5508 from ConnectAI-E/feature-buildin-plugin
add buildin plugin
2024-09-24 17:28:48 +08:00
Dogtiti fc0042a799
Merge pull request #5515 from DDMeaqua/config-artifacts
Config artifacts
2024-09-24 17:27:33 +08:00
DDMeaqua 269d064e0a fix: #5450 2024-09-24 15:21:27 +08:00
DDMeaqua 6c8143b7de feat: 全局设置是否启用artifacts 2024-09-24 15:15:08 +08:00
lloydzhou f9f99639db update 2024-09-24 12:59:21 +08:00
lyf 6d5bf490ab fix media 2024-09-24 11:36:26 +08:00
Dogtiti 46fc2a5012
Merge pull request #5498 from DDMeaqua/fix-plugin-css
Fix plugin css
2024-09-24 10:23:06 +08:00
lloydzhou 90e7b5aecf try using openai api key for dalle-3 plugin 2024-09-23 20:20:20 +08:00
lloydzhou ed20fd2962 1. add buildin plugin; 2. remove `usingProxy` 2024-09-23 20:00:07 +08:00
Lloyd Zhou 4c3fd55a75
Merge pull request #5495 from ConnectAI-E/Fix-code-duplication
Fix code duplication
2024-09-23 16:06:59 +08:00
lyf d95d509046 fex 2024-09-23 15:43:36 +08:00
lyf c15c852668 fex media 2024-09-23 15:12:45 +08:00
DDMeaqua 4a60512ae7 chore: css 2024-09-23 14:18:32 +08:00
DDMeaqua 0e210cf8de fix: #5486 plugin样式优化 2024-09-23 14:13:09 +08:00
lyf 35aa2c7270 Fix code duplication 2024-09-23 11:34:20 +08:00
lyf 518e0d90a5 fex url 2024-09-23 11:11:36 +08:00
lyf 51f7b02b27 fex en 2024-09-23 10:56:43 +08:00
Lloyd Zhou 23f2b6213c
Merge pull request #5489 from ConnectAI-E/feature-fix-openai-function-call
Feature fix openai function call
2024-09-22 19:08:35 +08:00
lloydzhou 3a969054e3 hotfix openai function call tool_calls no index 2024-09-22 18:59:49 +08:00
lloydzhou 4d1f9e49d4 hotfix openai function call tool_calls no index 2024-09-22 18:53:51 +08:00
lyf 702f5bd362 fex setCookie 2024-09-20 10:22:22 +08:00
Dogtiti 2474d5b6d2
Merge pull request #5304 from dustookk/main
fix no max_tokens in payload when the vision model name does not cont…
2024-09-19 20:34:23 +08:00
lyf 9858d1f958 add top tip 2024-09-19 18:21:26 +08:00
lyf 62efab987b add fanyi add top tip 2024-09-19 16:58:51 +08:00
DDMeaqua 4c63ee23cd feat: #5422 快捷键清除上下文 2024-09-19 15:13:33 +08:00
Dogtiti c75d9e3de4
Merge pull request #5463 from yudshj/main
修正了typo,WHITE_WEBDEV_ENDPOINTS -> WHITE_WEBDAV_ENDPOINTS
2024-09-19 14:26:48 +08:00
Yudong df222ded12 修正了typo, WebDev -> WebDav 2024-09-19 14:15:31 +08:00
river 7dc0f81d3f chore: change placeholder 2024-09-19 13:48:59 +08:00
river 23793e834d chore: change placeholder 2024-09-19 12:53:43 +08:00
river f4f3c6ad5a chore: change placeholder 2024-09-19 12:47:09 +08:00
river 775794e0e4 chore: add setting 2024-09-19 12:38:46 +08:00
endless-learner 03268ce4d8
Added 1-click deployment link for Alibaba Cloud. 2024-09-18 20:38:20 -07:00
Yudong 212d15fdd0 修正了typo,WHITE_WEBDEV_ENDPOINTS -> WHITE_WEBDAV_ENDPOINTS 2024-09-19 11:20:18 +08:00
river 065f015f7b feat: add error tip 2024-09-19 09:53:00 +08:00
Dogtiti b5ba05dd83
Merge pull request #5462 from JuliusMoehring/main
fix: Avoid fetching prompts.json serverside
2024-09-19 09:50:21 +08:00
river e4fda6cacf feat: add auth tip 2024-09-19 08:41:09 +08:00
JuliusMoehring accb526cd6 Avoid fetching prompts.json serverside 2024-09-18 18:07:10 +02:00
river 2f0d94a46b chore: add auth tip 2024-09-19 00:05:06 +08:00
river 8dc24403d8 chore: Update Chinese translation for API key placeholder 2024-09-18 22:05:51 +08:00
Dogtiti a8c70d84a9
Merge pull request #5459 from DDMeaqua/tts
add tts
2024-09-18 15:42:16 +08:00
DDMeaqua 10d7a64f88 fix: error 2024-09-18 15:37:21 +08:00
Dogtiti d51bbb4a81
Merge pull request #5444 from skymkmk/pr-fix-model-config-hydration
fix: config hydration and default model forced to set gpt-3.5-turbo
2024-09-18 15:26:47 +08:00
Dogtiti 848f794149
Merge pull request #5402 from DDMeaqua/fix-selector-css
fix: selector css
2024-09-18 15:19:08 +08:00
DDMeaqua 7f1b44befe fix: css 2024-09-18 15:04:41 +08:00
Dogtiti 9ddd5a0566
Merge pull request #5432 from ConnectAI-E/Feature-fork
feat fork
2024-09-18 15:04:27 +08:00
DDMeaqua a3b664763e chore: default header 2024-09-18 14:57:43 +08:00
lyf fd47bc1dc3 Add English copy 2024-09-18 13:56:44 +08:00
DDMeaqua dfaafe3adb Merge branch 'main' into tts 2024-09-18 13:48:28 +08:00
Dogtiti b4dc4d34eb
Merge pull request #5112 from DDDDD12138/remove-unused-imports
Chore: Remove Unused Imports and Add ESLint Rules
2024-09-18 11:37:47 +08:00
DDMeaqua 3ae8ec1af6 feat: tts 2024-09-18 11:24:25 +08:00
Lloyd Zhou 5c34666334
Merge pull request #5454 from SukkaW/ci-bump
ci: bump `actions/cache` to v4
2024-09-18 11:13:59 +08:00
DDMeaqua 212605a7e3 Merge branch 'main' into tts-stt 2024-09-18 10:39:56 +08:00
SukkaW 4ddfa9af8d ci: bump `actions/cache` to v4 2024-09-17 22:28:13 +08:00
skymkmk 36a0c7b8a3
fix: default is forced to set gpt-3.5-turbo if no server default model have been set 2024-09-16 02:07:22 +08:00
skymkmk 9e1e0a7252
fix: persisted available models ard not be update after source code have been updated 2024-09-16 02:06:17 +08:00
Dogtiti 027e5adf67
Merge pull request #5442 from DDDDD12138/fix-typo
chore: correct typo
2024-09-15 22:12:59 +08:00
DDDDD12138 e986088bec chore: correct typo 2024-09-15 21:59:21 +08:00
DDDDD12138 63ffd473d5 chore: remove unused imports 2024-09-15 20:17:02 +08:00
Dogtiti 9e5d92dc58
Merge pull request #5438 from DDMeaqua/fix-AnthropicCros
fix: ts error
2024-09-15 14:23:29 +08:00
Meaqua 8ac9141a29 fix: ts error 2024-09-15 14:21:27 +08:00
Dogtiti 313c942350
Merge pull request #5435 from DDMeaqua/fix-AnthropicCros
fix: #5429 Anthropic authentication_error CORS
2024-09-15 10:07:24 +08:00
Dogtiti 26c3edd023
Merge pull request #5430 from skymkmk/pr-manual-regen-title
feat: menual regen title
2024-09-14 18:10:32 +08:00
DDMeaqua 9a5a3d4ce4 fix: #5429 Anthropic authentication_error CORS 2024-09-14 16:06:18 +08:00
lyf 6e79b9a7a2 add fork 2024-09-14 14:19:11 +08:00
Dogtiti b32d82e6c1
Merge pull request #5426 from skymkmk/pr-summarize-customization
feat: summarize model customization
2024-09-14 14:18:25 +08:00
wuzhiqing a3585685df chore: add ESLint plugin and rules to remove unused imports
- Installed eslint-plugin-unused-imports
- Updated .eslintrc.json to include rules for detecting unused imports
2024-09-14 13:54:51 +08:00
Dogtiti f379865e2c
Merge pull request #5431 from tuanzisama/tuanzisama-patch-1
feat: Improve setting.model selector
2024-09-14 10:05:38 +08:00
Dogtiti 4eb4c31438
Merge pull request #5428 from Dakai/main
Add a space between model and provider in ModelSelector to improve readability.
2024-09-14 09:57:52 +08:00
evenwan 84a7afcd94 feat: Improve setting.model selector 2024-09-14 09:31:05 +08:00
skymkmk fa48ace39b
fix: prevent users from setting a extremly short history that resulting in no content being sent for the title summary 2024-09-14 07:49:26 +08:00
skymkmk 1b869d9305
translation: translations by claude for new writings 2024-09-14 07:42:06 +08:00
skymkmk 93bc2f5870
feat: now user can choose their own summarize model 2024-09-14 07:41:27 +08:00
skymkmk 37c0cfe1e9
translation: translations by claude for manual refresh 2024-09-14 07:28:07 +08:00
skymkmk fc27441561
feat: manual refresh for title 2024-09-14 07:21:19 +08:00
MatrixDynamo 79cfbac11f
Add a space between model and provider in ModelSelector to improve readability. 2024-09-14 03:19:24 +08:00
lloydzhou df62736ff6 update version 2024-09-13 17:36:32 +08:00
Dogtiti 6a464b3e5f
Merge pull request #5418 from ConnectAI-E/hotfix/artifact
fixed: html codeblock include 2 newline
2024-09-13 17:32:21 +08:00
Lloyd Zhou 57fcda80df
Merge pull request #5419 from DDMeaqua/feat-shortcutkey
chore: 手机端隐藏快捷键展示
2024-09-13 17:31:22 +08:00
DDMeaqua db39fbc419 chore: 手机端隐藏快捷键展示 2024-09-13 16:56:06 +08:00
lloydzhou 3dabe47c78 fixed: html codeblock include 2 newline 2024-09-13 16:27:02 +08:00
Dogtiti affc194cde
Merge pull request #5416 from skymkmk/pr-add-o1
feat: add o1 model
2024-09-13 16:26:42 +08:00
skymkmk 03fa580a55
fix: give o1 some time to think twice 2024-09-13 16:25:04 +08:00
skymkmk d0dce654bf
fix: shouldstream is not depend on iso1 2024-09-13 14:18:18 +08:00
Dogtiti 169323e238
Merge pull request #5415 from skymkmk/pr-fix-incorrect-vision-model-judgement
fix: remove the visual model judgment method that checks if the model…
2024-09-13 13:57:37 +08:00
skymkmk 71df415b14
feat: add o1 model 2024-09-13 13:34:49 +08:00
skymkmk 6bb01bc564
fix: remove the visual model judgment method that checks if the model name contains 'preview' from the openai api to prevent models like o1-preview from being classified as visual models 2024-09-13 12:56:28 +08:00
Dogtiti 07c6fe5975
Merge pull request #5406 from ChatGPTNextWeb/river
fix: #4240 remove tip when 0 context
2024-09-12 21:48:11 +08:00
Dogtiti 5964181cb2
Merge pull request #5405 from xmcp/patch-1
fix typo in UI
2024-09-12 21:47:39 +08:00
river 4b8288a2b2 fix: #4240 remove tip when 0 context 2024-09-12 21:05:02 +08:00
xmcp 88b1c1c6a5
fix typo 2024-09-12 19:54:16 +08:00
Dogtiti 1234deabfa
Merge pull request #5396 from DDMeaqua/feat-shortcutkey
feat: add shortcut key
2024-09-12 13:57:24 +08:00
DDMeaqua ebaeb5a0d5 fix: selector css 2024-09-11 17:54:48 +08:00
DDMeaqua 45306bbb6c fix: ts error 2024-09-10 14:37:58 +08:00
DDMeaqua 18e2403b01 chore: 更换icon 2024-09-10 14:30:51 +08:00
DDMeaqua e578c5f3ad chore: 样式更新 2024-09-10 12:01:51 +08:00
DDMeaqua 61245e3d7e fix: dark theme css 2024-09-09 19:29:10 +08:00
DDMeaqua 7804182d0d fix: type error 2024-09-09 19:18:12 +08:00
DDMeaqua f2195154f6 feat: add shortcut key 2024-09-09 18:55:37 +08:00
Dogtiti 35f77f45a2
Merge pull request #5386 from ConnectAI-E/feature/safeLocalStorage
fix: safaLocalStorage
2024-09-09 16:48:25 +08:00
Dogtiti 992c3a5d3a fix: safaLocalStorage 2024-09-08 13:23:40 +08:00
mayfwl d51d7b6797
Merge pull request #5376 from MrrDrr/add_chatgpt_4o_latest
add chatgpt-4o-latest
2024-09-08 10:15:41 +08:00
lloydzhou 23ac2efd89 hotfix and update version 2024-09-07 22:12:42 +08:00
Lloyd Zhou daeffb2dc6
Merge pull request #5383 from SukkaW/fix-5378
fix(#5378): default plugin ids to empty array
2024-09-07 22:09:35 +08:00
SukkaW db58ca6c1d fix(#5378): default plugin ids to empty array 2024-09-07 21:32:18 +08:00
Lloyd Zhou 2ff292cbfa
Merge pull request #5381 from reggiezhang/patch-1
Add crossOrigin="use-credentials" for site.webmanifest
2024-09-07 16:58:07 +08:00
Reggie Zhang 5a81393863
Add crossOrigin="use-credentials" for site.webmanifest
Add `crossOrigin="use-credentials"` to the `<link>` element for `site.webmanifest` when the site is behind a proxy with authentication.
2024-09-07 16:24:52 +08:00
Lloyd Zhou 116a73d398
Merge pull request #5377 from ConnectAI-E/hotfix/mermaid
hotfix Mermaid can not render. close #5374
2024-09-07 13:01:36 +08:00
lloydzhou cf0c057164 hotfix Mermaid can not render. close #5374 2024-09-07 13:00:55 +08:00
Lloyd Zhou fe5a4f4447
Merge pull request #5375 from Kosette/fix-version
fix: update package version
2024-09-07 11:55:23 +08:00
l.tingting c1b74201e4 add chatgpt-4o-latest 2024-09-07 01:42:56 +08:00
kosette 27828d9ca8 fix: update package version 2024-09-06 23:07:01 +08:00
Dogtiti 2bd799fac6
Merge pull request #5331 from ConnectAI-E/feature/plugin
Feature plugin (GPTs like action based on function call)
2024-09-06 20:06:50 +08:00
lloydzhou 9275f2d753 add awesome plugin repo url 2024-09-06 19:37:24 +08:00
lloydzhou 7455978ee5 default enable artifact 2024-09-06 09:26:06 +08:00
lloydzhou 7c0acc7b77 hotfix tools empty array 2024-09-05 22:02:06 +08:00
Lloyd Zhou f32dd69acf
Merge pull request #7 from ConnectAI-E/feature/plugin-artifact
Feature/plugin artifact
2024-09-05 14:54:01 +08:00
lloydzhou 80b8f956a9 move artifacts into mask settings 2024-09-05 14:49:11 +08:00
lloydzhou caf50b6e6c move artifacts into mask settings 2024-09-05 14:46:16 +08:00
lloydzhou b590d0857c disable nextjs proxy, then can using dalle as plugin 2024-09-05 13:51:59 +08:00
Lloyd Zhou 982019307c
Merge pull request #6 from ConnectAI-E/feature/plugin-app-cors
using tauri http api run plugin to fixed cors in App
2024-09-04 22:49:35 +08:00
lloydzhou 09aec7b22e using tauri http api run plugin to fixed cors in App 2024-09-04 21:32:22 +08:00
lloydzhou f9a047aad4 using tauri http api run plugin to fixed cors in App 2024-09-04 21:04:13 +08:00
Lloyd Zhou 85704570f3
Merge pull request #5356 from ConnectAI-E/feature/indexdb
fix: hydrated for indexedDB
2024-09-04 19:14:18 +08:00
lloydzhou 53dcae9e9c update 2024-09-04 13:00:18 +08:00
lloydzhou 04e1ab63bb update readme 2024-09-04 11:47:42 +08:00
Dogtiti ed9aae531e fix: hydrated 2024-09-03 20:29:01 +08:00
lloydzhou 6ab6b3dbca remove no need code 2024-09-03 20:21:37 +08:00
lloydzhou 7180ed9a60 hotfix 2024-09-03 19:56:22 +08:00
lloydzhou 0a5522d28c update 2024-09-03 19:35:36 +08:00
lloydzhou c7bc93b32b Merge remote-tracking branch 'connectai/main' into feature/plugin 2024-09-03 19:02:40 +08:00
lloydzhou d30351e7b0 update readme 2024-09-03 17:18:43 +08:00
Dogtiti 886ffc0af8 fix: hydrated for indexedDB 2024-09-03 17:12:48 +08:00
lloydzhou 4fdd997108 hotfix 2024-09-03 16:23:54 +08:00
lloydzhou 236736deea remove no need code 2024-09-03 15:37:23 +08:00
lloydzhou 2b317f60c8 add config auth location 2024-09-03 12:00:55 +08:00
lloydzhou 3ec67f9f47 add load from url 2024-09-03 00:45:11 +08:00
lloydzhou 6435e7a30e update readme 2024-09-02 23:42:56 +08:00
lloydzhou 078305f5ac kimi support function call 2024-09-02 21:55:17 +08:00
lloydzhou 801b62543a claude support function call 2024-09-02 21:45:47 +08:00
lloydzhou 877668b629 hotfix 2024-09-02 18:29:00 +08:00
lloydzhou f652f73260 plugin add auth config 2024-09-02 18:11:19 +08:00
lloydzhou b2965e1deb update 2024-08-31 00:16:47 +08:00
lloydzhou 2214689920 add gapier proxy 2024-08-30 23:51:03 +08:00
lloydzhou 9326ff9d08 ts error 2024-08-30 23:39:08 +08:00
lloydzhou 271f58d9cf stash code 2024-08-30 17:31:20 +08:00
lloydzhou cac99e3908 add Plugin page 2024-08-30 13:02:03 +08:00
Dogtiti 97a4a910e0
Merge pull request #5335 from ConnectAI-E/fix/right-click
fix: right click
2024-08-29 20:49:31 +08:00
Dogtiti 19c7a84548 fix: right click 2024-08-29 20:48:04 +08:00
lloydzhou 571ce11e53 stash code 2024-08-29 19:55:09 +08:00
lloydzhou d2cb984ced add processToolMessage callback 2024-08-29 17:28:15 +08:00
lloydzhou 7fc0d11931 create common function stream for fetchEventSource 2024-08-29 17:14:23 +08:00
lloydzhou 341a52a615 stash code 2024-08-29 01:35:41 +08:00
lloydzhou d58b99d602 stash code 2024-08-29 01:00:16 +08:00
lloydzhou f7a5f836db stash code 2024-08-29 00:56:20 +08:00
lloydzhou d212df8b95 stash code 2024-08-29 00:39:51 +08:00
lloydzhou f3f6dc57c3 stash code 2024-08-29 00:32:35 +08:00
lloydzhou 29b5cd9436 ts error 2024-08-29 00:21:26 +08:00
lloydzhou f5209fc344 stash code 2024-08-28 23:58:46 +08:00
DDMeaqua c5168c2132 fix: i18n 2024-08-28 13:15:52 +08:00
DDMeaqua 318e0989a2 fix: transcription headers 2024-08-28 13:13:41 +08:00
DDMeaqua d8b1781d7b Merge branch 'tts-stt' of https://github.com/DDMeaqua/ChatGPT-Next-Web into tts-stt 2024-08-28 12:40:20 +08:00
DDMeaqua ed5aea0521 fix: bug 2024-08-28 12:37:19 +08:00
Meaqua e9f90a4d82 fix: i18n 2024-08-27 21:49:00 +08:00
DDMeaqua f86b220c92 feat: add voice action 2024-08-27 19:50:16 +08:00
Lloyd Zhou b6bb1673d4
Merge pull request #5324 from ConnectAI-E/feature/indexdb
feat: add indexDB
2024-08-27 18:07:20 +08:00
DDMeaqua 93f1762e6c chore: wip 2024-08-27 17:02:44 +08:00
DDMeaqua 2f410fc09f feat: add tts stt 2024-08-27 16:21:02 +08:00
Dogtiti 7b6fe66f2a feat: try catch indexedDB error 2024-08-27 10:05:37 +08:00
Dogtiti c2fc0b4979 feat: try catch indexedDB error 2024-08-27 09:57:07 +08:00
Dogtiti 0b758941a4 feat: clear indexDB 2024-08-26 21:23:21 +08:00
Dogtiti 492b55c893 feat: add indexDB 2024-08-26 21:20:07 +08:00
Dogtiti 4060e367ad feat: add indexDB 2024-08-26 21:13:35 +08:00
lloydzhou c99cd31b6b add openapi-client-axios 2024-08-24 15:34:33 +08:00
Lloyd Zhou 718782f5b1
Merge pull request #5309 from ElricLiu/main
Update README.md
2024-08-24 12:11:46 +08:00
ElricLiu 0c3fb5b2ce
Update README.md
add monica sponsored
2024-08-23 17:28:00 +08:00
mayfwl 4ec6b067e7
fix: artifact render error (#5306)
fix: artifact render error
2024-08-21 18:48:44 +08:00
Dogtiti 1748dd6a3b
Merge pull request #5303 from ConnectAI-E/Modifylang
Modify View All Languages
2024-08-21 16:28:36 +08:00
mayfwl 95332e50ed
Merge branch 'main' into Modifylang 2024-08-21 15:45:04 +08:00
yihang3 56eb9d1430 fix no max_tokens in payload when the vision model name does not contain 'vision'. 2024-08-21 15:22:31 +08:00
lyf 8496980cf9 Modify View All Languages 2024-08-21 14:28:39 +08:00
Lloyd Zhou ffe32694b0
Merge pull request #5300 from ConnectAI-E/hotfix/hide-button
Hotfix/hide button
2024-08-21 14:12:03 +08:00
lloydzhou 3d5b21154b update 2024-08-21 12:05:03 +08:00
lloydzhou 4b9697e336 fix: typescript error 2024-08-21 11:54:48 +08:00
lloydzhou b0e9a542ba frat: add reload button 2024-08-21 11:17:00 +08:00
lloydzhou 8b67536c23 fix: 修复多余的查看全部 2024-08-21 10:28:34 +08:00
lloydzhou cd49c12181 fix: 修复查看全部按钮导致artifacts失效 2024-08-21 10:27:37 +08:00
Lloyd Zhou a6b14c7910
Merge pull request #5274 from Movelocity/feat/search-history
feat: add a page to search chat history
2024-08-21 00:58:01 +08:00
heweikang e275abdb9c match the origin format 2024-08-20 19:49:47 +08:00
heweikang 09a90665d5 Merge branch 'feat/search-history' of https://github.com/Movelocity/ChatGPT-Next-Web into feat/search-history 2024-08-20 19:47:41 +08:00
heweikang 6649fbdfd0 remove an empty line 2024-08-20 19:47:36 +08:00
heweikang 64a0ffee7b Merge branch 'main' into feat/search-history 2024-08-20 19:47:03 +08:00
Hollway b529118f31
Merge branch 'ChatGPTNextWeb:main' into feat/search-history 2024-08-20 19:46:08 +08:00
heweikang 39d7d9f13a migrate the search button to plugins discovery 2024-08-20 19:44:22 +08:00
heweikang fcd55df969 wrap doSearch with useCallback 2024-08-20 09:45:34 +08:00
Dogtiti 1e59948358
Merge pull request #5288 from zhangjian10/main
fix: Determine if Tencent is authorized
2024-08-19 09:20:58 +08:00
zhangjian10 1102ef6e6b
fix: Determine if Tencent is authorized 2024-08-18 23:47:23 +08:00
heweikang 7ce2e8f4c4 resolve a warning 2024-08-17 11:26:38 +08:00
heweikang fd1c656bdd add all translations for SearchChat 2024-08-17 11:08:38 +08:00
heweikang 82298a760a Merge branch 'main' into feat/search-history 2024-08-17 10:15:49 +08:00
heweikang b84bb72e07 add null check for search content 2024-08-17 10:06:56 +08:00
lloydzhou 495b321d0a Merge remote-tracking branch 'origin/main' 2024-08-16 16:35:31 +08:00
lloydzhou 8a38cdc1d7 update version 2024-08-16 16:35:13 +08:00
Lloyd Zhou e210db7bd9
Merge pull request #5235 from ConnectAI-E/feature/access
代码折叠
2024-08-16 16:34:13 +08:00
Lloyd Zhou f64763b74b
Merge pull request #5093 from DDMeaqua/feat-i18n
feat: update i18n
2024-08-16 16:20:31 +08:00
Lloyd Zhou e033bef544
Merge branch 'main' into feat-i18n 2024-08-16 16:13:13 +08:00
DDMeaqua d88cc575e5 chore: update 2024-08-16 16:10:31 +08:00
heweikang e3f499be0c hide search button text 2024-08-16 10:23:27 +08:00
Dogtiti 87325fad74
Merge pull request #5270 from DDMeaqua/main
feat: add model name to conversation page
2024-08-15 20:12:33 +08:00
Hollway 86220573b6
Update yarn.lock to match origin version 2024-08-15 17:27:14 +08:00
heweikang 65ed6b02a4 Merge branch 'main' into feat/search-history 2024-08-15 17:24:17 +08:00
heweikang 98093a1f31 make search result item easier to click 2024-08-15 17:21:39 +08:00
heweikang 00990dc195 use yarn instead of npm 2024-08-15 17:09:41 +08:00
Lloyd Zhou 122aa94c9f
Merge pull request #5273 from ElricLiu/main
Update README.md
2024-08-15 14:35:48 +08:00
heweikang 3da5284a07 优化搜索算法,更新图标 2024-08-15 12:38:20 +08:00
heweikang cd920364f8 Add page to search chat history 2024-08-14 22:28:05 +08:00
ElricLiu e2e8a45104
Update README.md 2024-08-14 20:38:52 +08:00
DDMeaqua fb5fc13f72 feat: add model name 2024-08-13 19:58:44 +08:00
Dogtiti edb92f7bfb
Merge pull request #5245 from MrrDrr/dalle3_add_options
dall-e-3 adds 'quality' and 'style' options
2024-08-13 15:31:18 +08:00
lyf 1980f43b9f feat(auth): xg feature 2024-08-12 16:31:12 +08:00
lyf c3c3dd5154 feature add zhedie 2024-08-12 14:06:21 +08:00
lyf 356155fd30 feature add zhedie 2024-08-12 13:58:53 +08:00
lyf 6f75ef8f0a addcd 2024-08-10 12:45:50 +08:00
l.tingting ca865a80dc dall-e-3 adds 'quality' and 'style' options 2024-08-10 11:09:07 +08:00
lyf 8f759d1c3e 代码折叠 2024-08-09 14:55:25 +08:00
lyf 44787637f2 tj 2024-08-08 21:07:03 +08:00
Dogtiti cf1c8e8f2a
Merge pull request #5233 from ConnectAI-E/feature/bangpt
Feature/bangpt
2024-08-08 17:27:50 +08:00
lyf d948be2372 Merge branch 'feature/bangpt' of ssh://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/bangpt 2024-08-08 16:25:49 +08:00
lyf cc28aef625 add gpt-4o-2024-08-06 2024-08-08 16:23:40 +08:00
Dogtiti 036358de7c
Merge pull request #5219 from ConnectAI-E/feature/access
修复ChatGPTNextWeb页面的无障碍问题
2024-08-08 13:59:16 +08:00
lyf 0958b9ee12 tsxiugai 2024-08-08 13:08:56 +08:00
Dogtiti aff1d7ecd6
Merge pull request #5223 from 122cygf/main
修复拼写错误
2024-08-07 21:29:43 +08:00
lzz 42fdbd9bb8
Fix spelling errors 2024-08-07 17:22:32 +08:00
lyf 034c82e514 修改设置的无障碍 2024-08-07 13:39:23 +08:00
lyf 14ff46b5cd 解决按钮无障碍 2024-08-07 13:01:08 +08:00
lyf c9099ca0a5 无障碍按钮和链接 2024-08-07 10:55:02 +08:00
Dogtiti 58b144b345
Merge pull request #5218 from ConnectAI-E/feature/bangpt
解决禁止gpt4时禁止gtp4o-mini问题
2024-08-07 10:45:38 +08:00
lyf af21c57e77 ban gpt4 2024-08-07 10:15:39 +08:00
lloydzhou 624e4dbaaf update version 2024-08-06 22:41:35 +08:00
Dogtiti 9bbb4f396b
Merge pull request #5181 from webws/feature/support-iflytek-spark
Feature/support iflytek spark
2024-08-06 19:32:43 +08:00
Dogtiti 1287e39cc6 feat: run test before build 2024-08-06 19:24:47 +08:00
Dogtiti 1ef2aa35e9 feat: jest 2024-08-06 18:03:27 +08:00
webws b2c1644d69 feat: add support for iFLYTEK Spark API (接入讯飞星火模型) 2024-08-06 17:54:30 +08:00
Lloyd Zhou 5629f842da
Merge pull request #5174 from ahzmr/main
style: Fixed an issue where the sample of the reply content was displayed out of order
2024-08-06 16:52:43 +08:00
HyiKi f900283b09 fix: tencent InvalidParameter error
fix "Messages 中 system 角色必须位于列表的最开始"
2024-08-06 15:50:12 +08:00
HyiKi b667eff6bd fix: baidu error_code 336006
change the summary role from system to user
2024-08-06 15:50:12 +08:00
HyiKi 54fdf40f5a fix: baidu error_code 336006 2024-08-06 15:50:12 +08:00
Lloyd Zhou 690542145d
Merge pull request #5194 from HyiKi/main
fix: baidu error_code 336006
2024-08-06 13:56:44 +08:00
Lloyd Zhou 94c4cf0624
Merge pull request #5205 from ConnectAI-E/fix/azure-summary
fix: azure summary
2024-08-06 11:23:00 +08:00
Dogtiti 3da717d9fc fix: azure summary 2024-08-06 11:20:03 +08:00
HyiKi 0902efc719 Merge branch 'fix-role-error' 2024-08-06 10:53:32 +08:00
HyiKi d7e2ee63d8 fix: tencent InvalidParameter error
fix "Messages 中 system 角色必须位于列表的最开始"
2024-08-06 10:45:25 +08:00
GH Action - Upstream Sync 7deb36ee1f Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-08-06 01:33:38 +00:00
GH Action - Upstream Sync bfe4e88246 Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-08-06 01:00:29 +00:00
HyiKi 9ab45c3969 fix: baidu error_code 336006
change the summary role from system to user
2024-08-05 20:50:36 +08:00
Dogtiti fec80c6c51
Merge pull request #5173 from ConnectAI-E/feature/dalle
add dalle3 model
2024-08-05 20:49:37 +08:00
Dogtiti a6b7432358
Merge pull request #5180 from frostime/contrib-modellist
 feat: 调整模型列表,将自定义模型放在前面显示
2024-08-05 20:49:07 +08:00
frostime 3486954e07 🐛 fix(openai): 上次 commit 后 openai.ts 文件中出现类型不匹配的 bug 2024-08-05 20:26:48 +08:00
frostime 150fc84b9b feat(model): 增加 sorted 字段,并使用该字段对模型列表进行排序
1. 在 Model 和 Provider 类型中增加 sorted 字段(api.ts)
2. 默认模型在初始化的时候,自动设置默认 sorted 字段,从 1000 开始自增长(constant.ts)
3. 自定义模型更新的时候,自动分配 sorted 字段(model.ts)
2024-08-05 19:43:32 +08:00
frostime b023a00445 🔨 refactor(model): 更改原先的实现方法,在 collect table 函数后面增加额外的 sort 处理 2024-08-05 16:37:22 +08:00
HyiKi d0e296adf8 fix: baidu error_code 336006 2024-08-05 15:41:13 +08:00
Dogtiti aa40015e9b
Merge pull request #5190 from ConnectAI-E/feaure/hmac
reduce cloudflare functions build size
2024-08-05 14:07:55 +08:00
lloydzhou 141ce2c99a reduce cloudflare functions build size 2024-08-05 12:59:27 +08:00
lloydzhou 4a95dcb6e9 hotfix get wrong llm 2024-08-05 12:45:25 +08:00
lloydzhou 1610675c8f remove hash.js 2024-08-05 11:36:35 +08:00
GH Action - Upstream Sync 724c814bfe Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web 2024-08-04 01:04:56 +00:00
Dogtiti 764c0cb865
Merge pull request #5179 from frostime/contrib-font
 feat(config): Config the font-family of chat content
2024-08-03 21:04:43 +08:00
frostime 8a4b8a84d6 feat: 调整模型列表,将自定义模型放在前面显示 2024-08-03 17:16:05 +08:00
frostime 8ec6acc55a 🎨 i18n: add locale about "font family config"
Note: The language is translated by GPT
2024-08-03 16:24:51 +08:00
frostime b6a022b0ef feat(config): Set markdown's font-family 2024-08-03 16:04:20 +08:00
李超 716899c030 fix: Fixed the issue that WebDAV synchronization could not check the status and failed during the first backup 2024-08-03 12:40:48 +08:00
Dogtiti d9e407fd2b
Merge pull request #5175 from frostime/upstream-main
 feat: 为命令前缀( `:` )增加对中文符号 `:`的支持
2024-08-03 10:37:51 +08:00
frostime deb140de73 feat: 为命令前缀( `:` )支持中文符号 `:` 2024-08-02 22:38:31 +08:00
织梦人 3c1e5e7978
Merge branch 'ChatGPTNextWeb:main' into main 2024-08-02 22:26:28 +08:00
Dogtiti 4a8e85c28a fix: empty response 2024-08-02 22:16:08 +08:00
李超 8498cadae8 fix: Fixed an issue where the sample of the reply content was displayed out of order 2024-08-02 21:06:03 +08:00
lloydzhou 8c83fe23a1 using b64_json for dall-e-3 2024-08-02 20:58:21 +08:00
Lloyd Zhou a8c65e3d27
Merge pull request #5126 from DDDDD12138/fix-watch-mask
fix: replace '&' with concurrently for cross-platform compatibility
2024-08-02 19:22:57 +08:00
Lloyd Zhou 324d30bef9
Merge pull request #5136 from frostime/contrib-txtcode
 feat(markdown): 对纯文本的代码块内容进行折行处理
2024-08-02 19:22:13 +08:00
lloydzhou 46cb48023e fix typescript error 2024-08-02 18:50:48 +08:00
lloydzhou 1c24ca58c7 add dalle3 model 2024-08-02 18:03:19 +08:00
Lloyd Zhou 9193a9a0e0
Merge pull request #5147 from ConnectAI-E/fix-locales
fix: locales => Locale
2024-08-02 18:02:13 +08:00
Lloyd Zhou 957244ba2e
Merge pull request #5154 from ConnectAI-E/fix-sidebar-style
fix: sidebar title style
2024-08-02 18:01:51 +08:00
lloydzhou ac599aa47c add dalle3 model 2024-08-02 18:00:42 +08:00
Dogtiti 67a90ffb76
Merge pull request #5171 from ConnectAI-E/feature/tencent
Feature/tencent
2024-08-02 16:43:00 +08:00
lloydzhou feaa6f9bf0 Merge remote-tracking branch 'connectai/feature/tencent' into feature/tencent 2024-08-02 15:56:25 +08:00
lloydzhou 753bf3b924 hotfix for cf deploy error #5168 2024-08-02 15:55:26 +08:00
Dogtiti b3219f57c8
Merge pull request #5157 from ConnectAI-E/feature/tencent
Feature/tencent
2024-08-01 23:56:27 +08:00
Dogtiti a17df037af feat: add lodash-es 2024-08-01 23:54:14 +08:00
lloydzhou dfc36e5210 update 2024-08-01 19:02:40 +08:00
lloydzhou c359b92ddc update 2024-08-01 18:58:07 +08:00
lloydzhou e1d6131f13 update 2024-08-01 16:49:55 +08:00
lloydzhou 6a0bda00f5 merge main 2024-08-01 15:39:54 +08:00
lloydzhou f85ec95877 hotfix for tencent sign 2024-08-01 15:33:48 +08:00
lloydzhou a024980c03 revert package.json 2024-08-01 13:21:36 +08:00
lloydzhou fd9e94e078 merge main 2024-08-01 13:18:33 +08:00
Dogtiti f6a6c51d15
Merge pull request #5156 from ConnectAI-E/feature/moonshot
add moonshot api
2024-08-01 12:28:10 +08:00
lloydzhou 966db1e4be add moonshot settings 2024-08-01 12:03:22 +08:00
lloydzhou b8bbc37b8e merge main 2024-08-01 11:57:23 +08:00
lloydzhou 40cbabc330 add moonshot api 2024-08-01 11:55:22 +08:00
wuzhiqing 04a4e1b39a fix: replace '&' with concurrently for cross-platform compatibility
- Used `concurrently` to run parallel commands in package.json scripts
- Added `concurrently` as a devDependency in package.json
2024-08-01 10:05:21 +08:00
Dogtiti 99f3160aa2 fix: sidebar title style 2024-07-31 22:10:24 +08:00
Dogtiti 8cb72d8452 fix: locales => Locale 2024-07-30 21:25:33 +08:00
Dogtiti c9eb9f3eda
Merge pull request #5141 from ConnectAI-E/hotfix-google-api-in-app
hotfix for using google api in app
2024-07-30 15:42:30 +08:00
lloydzhou 64c3dcd732 hotfix for using google api in app 2024-07-30 12:03:06 +08:00
frostime d49ececcc5 feat(markdown): wrap the plain text code block 2024-07-29 19:10:00 +08:00
Lloyd Zhou 90e1fadb1e
Merge pull request #5124 from ElricLiu/main
Update README.md
2024-07-29 14:58:07 +08:00
ElricLiu 071391ddff
Update README.md 2024-07-29 14:57:46 +08:00
ElricLiu d70d46b4d5
Update README.md 2024-07-29 14:56:39 +08:00
ElricLiu 3ef596b215
Update README.md
Update redame,add Artifacts & SD
2024-07-29 14:42:30 +08:00
Lloyd Zhou 35c5518668
Merge pull request #5110 from ConnectAI-E/fix/owner
chore: update owner
2024-07-26 19:28:45 +08:00
lloydzhou 8b513537b7 release v2.14.0 2024-07-26 19:24:16 +08:00
ElricLiu b27f394995
Merge pull request #5092 from ConnectAI-E/feature-artifacts
[Artifacts] add preview html code
2024-07-26 17:28:49 +08:00
Dogtiti 3f9f556e1c fix: iframe bg 2024-07-26 17:06:10 +08:00
Dogtiti 1772d5c4b6 chore: update owner 2024-07-26 16:00:25 +08:00
Dogtiti 715d1dc02f fix: default enable artifacts 2024-07-26 15:55:01 +08:00
Dogtiti 6737f016f5 chore: artifact => artifacts 2024-07-26 15:50:26 +08:00
Dogtiti f2d2622172 fix: uploading loading 2024-07-26 13:49:15 +08:00
Dogtiti 72d6f97024 fix: ts error 2024-07-26 11:21:51 +08:00
Lloyd Zhou a0f0b4ff9e
Merge pull request #5 from ConnectAI-E/feature/artifacts-style
Feature/artifacts style
2024-07-25 23:36:34 +08:00
Dogtiti c27ef6ffbf feat: artifacts style 2024-07-25 23:29:29 +08:00
Lloyd Zhou f5499ff699
Merge pull request #5071 from ZTH7/main
Fix defaultModel undefined error
2024-07-25 21:02:39 +08:00
Mr. Z c4334d4e5f
Update model.ts 2024-07-25 20:03:54 +08:00
Dogtiti 51e8f0440d Merge branch 'feature-artifacts' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/artifacts-style 2024-07-25 19:43:20 +08:00
lloydzhou 5ec0311f84 fix typescript error 2024-07-25 19:38:18 +08:00
lloydzhou 556d563ba0 update 2024-07-25 19:31:16 +08:00
lloydzhou 6a083b24c4 fix typescript error 2024-07-25 19:22:18 +08:00
lloydzhou 825929fdc8 merge main 2024-07-25 19:18:45 +08:00
Lloyd Zhou 941a03ed6c
Merge pull request #4983 from OpenAI-Next/dev-sd
[Feature] Stable Diffusion
2024-07-25 19:13:37 +08:00
Lloyd Zhou cf63619182
Merge pull request #5103 from ConnectAI-E/hotfix-cache-upload-image
hotfix cache upload image
2024-07-25 19:01:10 +08:00
Mr. Z 5c04d3c5ea
Change method 2024-07-25 17:59:15 +08:00
Mr. Z 46a47db2d8
Merge branch 'ChatGPTNextWeb:main' into main 2024-07-25 17:55:53 +08:00
Dogtiti 21ef9a4567 feat: artifacts style 2024-07-25 17:37:21 +08:00
lloydzhou 6f0846b2af hotfix cache upload image 2024-07-25 17:26:16 +08:00
lloydzhou ecd78b3bdd hotfix cache upload image 2024-07-25 17:23:21 +08:00
lloydzhou d8afd1af88 add expiration_ttl for kv storage 2024-07-25 16:56:08 +08:00
lloydzhou 7c1bc1f1a1 hotfix: auto set height 2024-07-25 15:27:44 +08:00
lloydzhou 763fc89b29 add fullscreen button on artifact component 2024-07-25 15:10:17 +08:00
lloydzhou 47b33f2b17 hotfix: auto set height 2024-07-25 14:15:16 +08:00
lloydzhou 9f0e16b045 hotfix: ts check 2024-07-25 13:48:21 +08:00
lloydzhou 2efedb1736 update 2024-07-25 13:34:59 +08:00
lloydzhou 044116c14c add plugin selector on chat 2024-07-25 13:29:39 +08:00
lloydzhou b4bf11d648 add loading icon when upload artifact content 2024-07-25 12:49:19 +08:00
lloydzhou 6cc0a5a1a4 remove code 2024-07-25 12:36:48 +08:00
lloydzhou 8f14de5108 hotfix: ts check 2024-07-25 12:34:35 +08:00
lloydzhou 8f6e5d73a2 hotfix: can send sd task in client 2024-07-25 12:31:30 +08:00
lloydzhou ab9f5382b2 fix typescript 2024-07-24 20:51:33 +08:00
Dogtiti fd441d9303 feat: discovery icon 2024-07-24 20:41:41 +08:00
lloydzhou e31bec3aff save artifact content to cloudflare workers kv 2024-07-24 20:36:11 +08:00
Dogtiti 2a1c05a028 fix: bugs 2024-07-24 20:04:22 +08:00
lloydzhou 421bf33c0e save artifact content to cloudflare workers kv 2024-07-24 20:02:37 +08:00
DDMeaqua 9c7bacc65e chore: update 2024-07-24 10:45:17 +08:00
Dogtiti 3935c725c9 feat: sd setting 2024-07-23 22:44:09 +08:00
Dogtiti 908ee0060f chore: remove sd new 2024-07-23 22:23:34 +08:00
Dogtiti 82e6fd7bb5 feat: move sd config to store 2024-07-23 21:43:55 +08:00
Dogtiti 6b98b14179 fix: sd mobile 2024-07-23 20:24:56 +08:00
lloydzhou 1ecefd88f7 hotfix 2024-07-23 17:57:41 +08:00
lloydzhou 2e9e20ce7c auto height for html preview 2024-07-23 17:54:07 +08:00
lloydzhou fb60fbb217 auto height for html preview 2024-07-23 17:51:44 +08:00
lloydzhou 4199e17da0 auto height for html preview 2024-07-23 17:44:15 +08:00
DDMeaqua 86b8bfcb1f feat: update i18n 2024-07-23 15:21:38 +08:00
lloydzhou dfd089132d add preview html code 2024-07-23 14:39:31 +08:00
lloydzhou 3a10f58b28 add preview html code 2024-07-23 14:33:41 +08:00
Dogtiti 9d55adbaf2 refator: sd 2024-07-23 00:51:58 +08:00
Lloyd Zhou 00be2be24f
Merge pull request #5088 from ChatGPTNextWeb/revert-5080-refactor-components
Revert "feat: improve components structure"
2024-07-22 22:19:05 +08:00
Lloyd Zhou 5b126c7e52
Revert "feat: improve components structure" 2024-07-22 22:18:51 +08:00
Lloyd Zhou 1943f3b53f
Merge pull request #5080 from ConnectAI-E/refactor-components
feat: improve components structure
2024-07-22 19:08:02 +08:00
Dogtiti 4a0bef9afb
Merge pull request #5081 from consistent-k/main
chore: Remove useless judgment conditions
2024-07-22 17:31:00 +08:00
consistent-k dfd2a53129 chore: Remove useless judgment conditions 2024-07-22 08:50:15 +00:00
Mr. Z aa4e855012
Compatibility changes 2024-07-22 16:41:11 +08:00
Dogtiti d6089e6309 fix: mask json 2024-07-22 16:33:03 +08:00
Dogtiti 038e6df8f0 feat: improve components structure 2024-07-22 16:02:45 +08:00
Mr. Z 2fd68bcac3
Fix defaultModel undefined error 2024-07-21 16:42:34 +08:00
lloydzhou e468fecf12 update 2024-07-20 20:44:19 +08:00
lloydzhou fc31d8e5d1 merge origin/main 2024-07-20 15:15:46 +08:00
Dogtiti 115f357a07
Merge pull request #5067 from ConnectAI-E/feature-setting-page
refactor setting page
2024-07-20 14:53:20 +08:00
lloydzhou ac04a1cac8 resolve conflicts 2024-07-20 14:41:42 +08:00
lloydzhou 87a286ef07 refactor setting page 2024-07-20 14:36:47 +08:00
Dogtiti 622d8a4edb
Merge pull request #5063 from ChenglongWang/summary_model
Change gpt summary model to gpt-4o-mini
2024-07-19 23:53:00 +08:00
Dogtiti b44086f0dc
Merge pull request #4847 from yeung66/main
add google api safety settings by Settings page
2024-07-19 23:41:04 +08:00
Chenglong Wang 0236e13187
Change gpt summary model to gpt-4o-mini. 2024-07-19 23:36:57 +08:00
YeungYeah a3d4a7253f Merge remote-tracking branch 'source/main' 2024-07-19 21:38:25 +08:00
lloydzhou e079f1b31a release v2.13.1 2024-07-19 18:16:05 +08:00
Dogtiti 9a78a72eb3
Merge pull request #5061 from ConnectAI-E/feature-cache-storage
using cache storage store image data #5013
2024-07-19 17:37:14 +08:00
lloydzhou 862c2e8810 hotfix for code review 2024-07-19 17:22:54 +08:00
Lloyd Zhou 12cad4c418
Merge pull request #5062 from yaway/main
Add README_JA.md
2024-07-19 17:14:32 +08:00
yaway 89b9d3a7f7
Update README_JA.md 2024-07-19 16:41:46 +08:00
yaway 57831d4880
Update and rename README_JP.md to README_JA.md 2024-07-19 16:31:12 +08:00
lloydzhou 052004d70e using compressImage when serviceWorker register error 2024-07-19 16:03:22 +08:00
lloydzhou a765237441 reload page when sw installed. 2024-07-19 15:40:14 +08:00
lloydzhou ac470a6d07 Merge remote-tracking branch 'connectai/feature-cache-storage' into feature-cache-storage 2024-07-19 15:39:51 +08:00
Dogtiti 7237d33be3 fix: ts type 2024-07-19 14:55:47 +08:00
lloydzhou 1610b480af remove console.log 2024-07-19 13:54:33 +08:00
lloydzhou 287fa0a39c feat: 1. using cache storage store image data; 2. get base64image before chat to api #5013 2024-07-19 13:50:10 +08:00
Dogtiti afa1a4303b
Merge pull request #5060 from DDMeaqua/main
readme_cn 补充锚点
2024-07-19 11:28:04 +08:00
DDMeaqua 28cedb1493 feat: 中文版锚点 2024-07-19 11:24:13 +08:00
Dogtiti a280e25ee7
Merge pull request #5046 from PeterDaveHello/Add-zh-tw-prompts
Add Traditional Chinese prompts converted from Simplified Chinese version
2024-07-19 10:54:54 +08:00
Dogtiti 8464ca8931
Merge pull request #5045 from ConnectAI-E/fix/google
refactor: google
2024-07-19 10:19:04 +08:00
Dogtiti 44340f277d
Merge pull request #5054 from Leizhenpeng/main
chore: update nav link to #企业版
2024-07-19 09:51:32 +08:00
Dogtiti 74bf99f9ea
Merge pull request #5057 from PeterDaveHello/support-gpt-4o-mini
Add GPT-4o mini support
2024-07-19 09:48:48 +08:00
Peter Dave Hello 9caf820758 Add GPT-4o mini support
Reference:
- https://platform.openai.com/docs/models/gpt-4o-mini
- https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence/
2024-07-19 03:21:40 +08:00
YeungYeah 26c2598f56 fix: fix bug in generating wrong gemini request url 2024-07-18 23:41:20 +08:00
RiverRay 6d9abf261c
chore: update md 2024-07-18 23:24:29 +08:00
Lloyd Zhou b16d0185dd
Merge pull request #5033 from arloor/main
doc: fix Azure docs
2024-07-18 21:06:59 +08:00
Lloyd Zhou ca51c2e93d
Merge pull request #5035 from DDDDD12138/code-cleanup
chore: remove unused imports and correct typos
2024-07-18 21:05:52 +08:00
Lloyd Zhou da254975cd
Merge pull request #5052 from LiuElric/main
Add enterprise edition introduction
2024-07-18 21:05:03 +08:00
LiuElric 86bae6be3a
Merge pull request #1 from yaway/patch-2
Update README.md
2024-07-18 17:59:45 +08:00
yaway fc8c7ef18d
Update README.md 2024-07-18 17:56:37 +08:00
LiuElric f654629c6a
Update README.md 2024-07-18 17:37:47 +08:00
LiuElric d4a87c561a
Update README.md 2024-07-18 17:34:27 +08:00
LiuElric d8872d48b3
Update README_CN.md
Added Chinese introduction to enterprise edition
2024-07-18 17:30:56 +08:00
LiuElric ed16c2c18d
Update README.md
Add enterprise version description
2024-07-18 17:20:47 +08:00
yaway 0478a6ce3b
Create README_JP.md 2024-07-18 17:20:21 +08:00
LiuElric 68b60e82ba
Add ent cover 2024-07-18 17:09:32 +08:00
DDDDD12138 8edc0989e2 chore: remove unused imports and correct typos 2024-07-18 01:51:56 +08:00
Dogtiti fbf3551bbe
Merge pull request #5036 from DDDDD12138/update-env-template
chore: update .env.template to align with README.md
2024-07-17 22:55:49 +08:00
Peter Dave Hello 238d3122c4 Add Traditional Chinese prompts converted from Simplified Chinese version 2024-07-17 22:38:43 +08:00
YeungYeah ee22fba448
Merge branch 'main' into main 2024-07-17 22:16:30 +08:00
Dogtiti f8a2a28bff
Merge pull request #5041 from DDMeaqua/feat-scrollbar-css
scrollbar width change
2024-07-17 20:09:43 +08:00
Dogtiti b3cfaf1420 refator: google 2024-07-17 20:05:34 +08:00
Dogtiti 2e9f701bb7
Merge pull request #5040 from consistent-k/main
update constant for baidu  add ENIRE Speed  ENIRE Lite model
2024-07-17 19:05:31 +08:00
consistent-k 9aabc4ad6a update constant for baidu add ernie-tiny model 2024-07-17 10:41:31 +00:00
consistent-k 5dc731bc77 update constant for baidu add ernie-speed model 2024-07-17 09:29:43 +00:00
DDMeaqua 32d05c9855 feat: scrollbar width change 2024-07-17 17:01:50 +08:00
consistent-k 5a0d0c0b75 update constant for baidu add ernie-speed model 2024-07-17 08:54:53 +00:00
wuzhiqing 17d4a8fb26 chore: update .env.template to align with README.md 2024-07-17 11:12:26 +08:00
arloor 348c1a7d5f
doc: fix Azure docs 2024-07-17 09:15:20 +08:00
lloydzhou 49151dabf5 hotfix 2024-07-16 23:18:34 +08:00
lloydzhou eb7c7cdcb6 更新serviceworker逻辑 2024-07-16 16:37:55 +08:00
Dogtiti ec95292209
Merge pull request #5025 from jiangying000/main
fix typo in app/components/chat.tsx
2024-07-16 16:13:07 +08:00
Lloyd Zhou 4b84fb328c
Merge pull request #2 from OpenAI-Next/dev-sd-test
using indexdb store image data
2024-07-16 15:12:10 +08:00
jiangying 47d27c1f41 fix typo 2024-07-16 14:35:06 +08:00
lloydzhou 5267ad46da add header for service worker upload api 2024-07-16 11:51:43 +08:00
lloydzhou 94bc880b7f fixed typescript error 2024-07-16 01:45:15 +08:00
lloydzhou bab3e0bc9b using CacheStorage to store image #5013 2024-07-16 01:19:40 +08:00
lloydzhou b3a324b6f5 fixed typescript error 2024-07-15 21:21:50 +08:00
lloydzhou a1117cd4ee save blob to indexeddb instead of base64 image string 2024-07-15 20:47:49 +08:00
lloydzhou 6ece818d69 remove no need code 2024-07-15 20:28:55 +08:00
lloydzhou 5df09d5e2a move code to utils/file 2024-07-15 20:26:03 +08:00
lloydzhou 33450ce429 move code to utils/file 2024-07-15 20:09:21 +08:00
lloydzhou e2f0206d88 update using indexdb read sd image data 2024-07-15 19:29:25 +08:00
licoy 3767b2c7f9 test 2024-07-15 15:29:56 +08:00
Dogtiti 1779f1f3da
Merge pull request #4984 from RubuJam/chore/pr
chore: add chinese template
2024-07-15 11:51:23 +08:00
lloydzhou b9d1dca65d update version v2.13.0 2024-07-13 21:30:31 +08:00
LiuElric 8e4d26163a
Merge pull request #4989 from ConnectAI-E/hotfix/cf-ai-gateway
update custom bytedance models, and update labels in setting page
2024-07-12 23:10:25 +08:00
lloydzhou 53c1176cbf update labels in setting page 2024-07-12 23:06:37 +08:00
lloydzhou 46d3e7884b hotfix: bytedance custom models 2024-07-12 22:53:39 +08:00
Rubu Jam a0290b0c1b
chore: Modify `title` and `labels` 2024-07-12 14:52:07 +00:00
Lloyd Zhou b4ae706914
Merge pull request #4988 from ConnectAI-E/hotfix/cf-ai-gateway
Hotfix: default config
2024-07-12 22:06:02 +08:00
lloydzhou 476bdac717 update 2024-07-12 21:52:38 +08:00
lloydzhou 831627268d update 2024-07-12 21:41:12 +08:00
lloydzhou 9b97dca601 hotfix: custom comfig for Gemini api. #4944 2024-07-12 21:27:30 +08:00
LiuElric 4ea8c0802a
Merge pull request #4986 from ConnectAI-E/hotfix/cf-ai-gateway
hotfix: using custom models, create custom provider
2024-07-12 20:31:22 +08:00
lloydzhou 9203870df5 hotfix: using custom models, create custom provider 2024-07-12 20:19:36 +08:00
Dogtiti e8088d6e38
Merge pull request #4979 from ConnectAI-E/hotfix/cf-ai-gateway
support cloudflare ai gateway
2024-07-12 19:12:45 +08:00
LiuElric 59d9bcdd27
Merge pull request #4946 from billxc/fix_mac_large_icon
feat: update macOS icon to be consistent with design in public/macos.png
2024-07-12 19:01:43 +08:00
LiuElric 9d1b13ba73
Merge pull request #4933 from PeterDaveHello/locale-tw
Improve tw Traditional Chinese locale
2024-07-12 19:01:05 +08:00
licoy dd1030139b fix: sd image preview modal size 2024-07-12 15:20:09 +08:00
Rubu Jam 30ca2117bb
chore: add chinese template 2024-07-12 07:00:53 +00:00
Rubu Jam 89024a8dc8
chore: add PR template 2024-07-12 06:28:21 +00:00
lloydzhou 728c38396a support cloudflare ai gateway 2024-07-12 12:00:25 +08:00
licoy d61cb98ac7 Merge remote-tracking branch 'origin/dev-sd' into dev-sd 2024-07-12 10:33:58 +08:00
licoy a7ceb61e27 pref: remove console 2024-07-12 10:33:43 +08:00
licoy 74b915a790 fix: sd3 model default select 2024-07-12 10:32:06 +08:00
lloydzhou 01ea690421 remove code 2024-07-11 18:35:44 +08:00
lloydzhou 17cc9284a0 add config in readme 2024-07-11 15:35:36 +08:00
lloydzhou 498d0f0b8b merge main 2024-07-11 15:29:47 +08:00
Dogtiti 89049e1a22
Merge pull request #4974 from ConnectAI-E/hotfix/bytedance
hotfix: doubao display name
2024-07-11 14:57:21 +08:00
lloydzhou 5e7254e8dc hotfix: doubao display name 2024-07-11 14:46:12 +08:00
Dogtiti f8c2732fdc
Merge pull request #4971 from ConnectAI-E/hotfix/alibaba
change build messages for qwen in client
2024-07-11 10:25:39 +08:00
lloydzhou fec36eb298 hotfix 2024-07-11 10:22:30 +08:00
lloydzhou 2299a4156d change build messages for qwen in client 2024-07-11 00:50:58 +08:00
lloydzhou 32b82b9cb3 change build messages for qwen in client 2024-07-11 00:48:58 +08:00
Lloyd Zhou ba6039fc8b
Merge pull request #4968 from ConnectAI-E/hotfix/google
hotfix Gemini finish twice. #4955 #4966
2024-07-10 20:19:29 +08:00
lloydzhou 6885812d21 hotfix Gemini finish twice. #4955 #4966 2024-07-10 18:59:44 +08:00
Lloyd Zhou 844025ec14
Merge pull request #4942 from ConnectAI-E/feature/alibaba
feat: qwen
2024-07-09 21:51:16 +08:00
Lloyd Zhou 94bc91c554
Merge pull request #4939 from ConnectAI-E/feature/ByteDance
Feature/byte dance
2024-07-09 21:48:22 +08:00
lloydzhou 044c16da4c update 2024-07-09 21:17:32 +08:00
lloydzhou cd4784c54a update 2024-07-09 21:14:38 +08:00
lloydzhou 814aaa4a69 update config for alibaba(qwen) 2024-07-09 20:15:20 +08:00
lloydzhou e3b3a4fefa add custom settings 2024-07-09 20:09:03 +08:00
lloydzhou 3fcbb3010d Merge branch 'feature/ByteDance' into feature/alibaba 2024-07-09 20:04:53 +08:00
lloydzhou 7573a19dc9 add custom settings 2024-07-09 20:01:58 +08:00
lloydzhou 3628d68d9a update 2024-07-09 19:56:52 +08:00
lloydzhou 23872086fa merge code 2024-07-09 19:37:47 +08:00
lloydzhou bb349a03da fix get headers for bytedance 2024-07-09 19:21:27 +08:00
lloydzhou 82be426f78 fix eslint error 2024-07-09 18:19:34 +08:00
lloydzhou 9d2a633f5e 更新文档支持配置豆包的模型 2024-07-09 18:15:43 +08:00
lloydzhou 1149d45589 remove check vision model 2024-07-09 18:06:59 +08:00
lloydzhou 9d7e19cebf display doubao model name when select model 2024-07-09 18:05:23 +08:00
lloydzhou b3023543d6 update 2024-07-09 16:55:33 +08:00
lloydzhou c229d2c3ce merge main 2024-07-09 16:53:15 +08:00
Lloyd Zhou 47ea383ddd
Merge pull request #4936 from ConnectAI-E/feat-baidu
feat: support baidu model
2024-07-09 16:45:46 +08:00
lloydzhou f2a35f1114 add missing file 2024-07-09 16:38:22 +08:00
lloydzhou 147fc9a35a fix ts type error 2024-07-09 15:10:23 +08:00
lloydzhou 93a03f8fe4 Merge remote-tracking branch 'origin/main' into feat-baidu 2024-07-09 15:06:10 +08:00
lloydzhou 230e3823a9 update readme 2024-07-09 15:02:44 +08:00
lloydzhou b14a0f24ae update locales 2024-07-09 14:57:19 +08:00
Lloyd Zhou 5295802720
Merge pull request #4953 from ConnectAI-E/hotfix/azure-deployment-notfound
hotfix: old AZURE_URL config error: "DeploymentNotFound". #4945 #4930
2024-07-09 14:52:26 +08:00
lloydzhou fadd7f6eb4 try getAccessToken in app, fixbug to fetch in none stream mode 2024-07-09 14:50:40 +08:00
lloydzhou 011b76e4e7 review code 2024-07-09 13:39:39 +08:00
lloydzhou f68cd2c5c0 review code 2024-07-09 12:27:44 +08:00
lloydzhou 6ac9789a1c hotfix 2024-07-09 12:16:37 +08:00
licoy 2b0153807c feat: Add Stability API server relay sending 2024-07-09 09:50:04 +08:00
lloydzhou 34ab37f31e update CUSTOM_MODELS config for Azure mode. 2024-07-09 00:47:35 +08:00
lloydzhou 71af2628eb hotfix: old AZURE_URL config error: "DeploymentNotFound". #4945 #4930 2024-07-09 00:32:18 +08:00
Xiaochen 15f028abfb update macOS icon to be consistent with design in public/macos.png 2024-07-08 14:15:51 +08:00
Dogtiti 9bdd37bb63 feat: qwen 2024-07-07 21:59:56 +08:00
Dogtiti d726c71141 wip: tencent 2024-07-07 14:43:28 +08:00
Dogtiti 1caa61f4c0 feat: swap name and displayName for bytedance in custom models 2024-07-06 22:59:20 +08:00
Dogtiti f3e3f08377 fix: apiClient 2024-07-06 21:25:00 +08:00
Dogtiti 2ec8b7a804 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feature/ByteDance 2024-07-06 21:14:07 +08:00
Dogtiti 9f7d137b05 Merge branch 'main' of https://github.com/ConnectAI-E/ChatGPT-Next-Web into feat-baidu 2024-07-06 21:11:50 +08:00
Lloyd Zhou 7218f13783
Merge pull request #4934 from ConnectAI-E/feature/client-headers
feat: optimize getHeaders
2024-07-06 20:08:16 +08:00
Lloyd Zhou fa31e7802c
Merge pull request #4935 from ConnectAI-E/feature/get-client-api
feat: add getClientApi method
2024-07-06 20:06:59 +08:00
Dogtiti 9b3b4494ba wip: doubao 2024-07-06 14:59:37 +08:00
Dogtiti 785d3748e1 feat: support baidu model 2024-07-06 13:05:09 +08:00
Dogtiti 5e0657ce55 feat: add getClientApi method 2024-07-06 11:27:53 +08:00
Dogtiti 700b06f9c5 feat: optimize getHeaders 2024-07-06 11:16:00 +08:00
Dogtiti b58bbf8eb4 feat: optimize getHeaders 2024-07-06 11:15:06 +08:00
Dogtiti 2d1f522aaf
Merge pull request #4930 from ConnectAI-E/feature-azure
support azure deployment name
2024-07-06 10:50:33 +08:00
Peter Dave Hello 0b2863dfab Improve tw Traditional Chinese locale 2024-07-06 02:36:29 +08:00
Dogtiti 70907ead8a
Merge pull request #4850 from ImBIOS/patch-2
chore(app/layout.tsx): fix deprecated viewport nextjs 14
2024-07-06 00:06:44 +08:00
lloydzhou 6dc4844c12 using default azure api-version value 2024-07-06 00:05:59 +08:00
Dogtiti 14bc1b6aac chore: optimize the code 2024-07-05 23:56:10 +08:00
lloydzhou 183ad2a34b add missing file 2024-07-05 20:57:55 +08:00
lloydzhou d9758be3ae fix ts 2024-07-05 20:20:21 +08:00
lloydzhou 6b1b530443 remove makeAzurePath 2024-07-05 20:15:56 +08:00
lloydzhou 1c20137b0e support azure deployment name 2024-07-05 19:59:45 +08:00
Dogtiti c4a6c933f8
Merge pull request #4923 from ConnectAI-E/refactor-model-table
Refactor model table
2024-07-04 21:04:43 +08:00
lloydzhou 31d9444264 hotfix 2024-07-04 19:38:26 +08:00
Lloyd Zhou 8cb204e22e
refactor: get language (#4922)
* refactor: get language
2024-07-04 17:18:42 +08:00
lloydzhou 97aa72ec5b hotfix ts 2024-07-04 08:36:25 +00:00
lloydzhou a68341eae6 include providerId in fullName 2024-07-04 16:11:37 +08:00
lloydzhou aa08183439 hotfix 2024-07-04 16:03:35 +08:00
lloydzhou 7a5596b909 hotfix 2024-07-04 15:48:48 +08:00
lloydzhou b9ffd50992 using <modelName>@<providerName> as fullName in modelTable 2024-07-04 15:44:36 +08:00
lloydzhou 14f2a8f370 merge model with modelName and providerName 2024-07-04 15:32:08 +08:00
lloydzhou e7b16bfbc0 add function to check model is available 2024-07-04 15:30:24 +08:00
licoy a16725ac17 feat: Improve SD list data and API integration 2024-07-03 15:37:34 +08:00
ji-jinlong 2803a91673
readme 添加 DEFAULT_MODEL 参数 (#4915)
* README_CN.md 添加 DEFAULT_MODEL 的说明

更改默认模型, 很久之前就有大佬支持了, 但更多人只会看readme, readme没有的就以为不支持(包括我).

https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/4545

* Update README.md

Change default model, it has been supported by experts long ago, but more people only read the readme. If it's not in the readme, they assume it's not supported (including me).

https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/4545

* Update README.md

ch to en

* en2cn

* 保持位置和readme.md一致
2024-07-03 15:18:24 +08:00
Lloyd Zhou cf2fce7666
Merge pull request #4917 from ConnectAI-E/feature-make-mask-outsite
feat: using fetch to get buildin masks
2024-07-03 14:50:35 +08:00
lloydzhou 1609abd166 fix ts 2024-07-03 14:18:22 +08:00
lloydzhou 88c74ae18d feat: using fetch to get buildin masks 2024-07-03 14:09:55 +08:00
Lloyd Zhou 78e2b41e0c
Merge pull request #4906 from ConnectAI-E/feature-gemini-streaming
gemini using real sse format response #3677 #3688
2024-07-03 10:58:27 +08:00
Lloyd Zhou 501f8b028b
Merge pull request #4903 from ConnectAI-E/fix-claude-get-headers
Fix claude get headers
2024-07-03 10:57:49 +08:00
licoy 54401162bd fix: model version field name 2024-07-02 15:31:30 +08:00
licoy 7fde9327a2 feat: Improve the data input and submission acquisition of SD parameter panel 2024-07-02 15:19:44 +08:00
licoy bbbf59c74a Improve the Stability parameter control panel 2024-07-02 10:24:19 +08:00
lloydzhou c4ad66f745 remove console.log 2024-07-01 13:27:06 +00:00
lloydzhou 69974d5651 gemini using real sse format response #3677 #3688 2024-07-01 13:24:01 +00:00
Lloyd Zhou ce3b6a04c2
Merge pull request #4782 from josephrocca/patch-1
Fix web url
2024-07-01 11:18:22 +00:00
lloydzhou 37e2517dac fix: 1. anthropic client using common getHeaders; 2. always using `Authorization` header send access code 2024-07-01 10:24:33 +00:00
lloydzhou d65ddead11 fix: anthropic client using common getHeaders 2024-07-01 09:41:01 +00:00
licoy 34034be0e3 hide new chat button on sd page 2024-06-27 16:13:51 +08:00
licoy d21481173e feat: add SD page switching 2024-06-27 16:06:15 +08:00
licoy fa6ebadc7b feat: add plugin entry selection 2024-06-27 15:35:16 +08:00
licoy a51fb24f36 fix ts error 2024-06-27 15:13:45 +08:00
fred-bf c359b30763
Merge pull request #4891 from ChatGPTNextWeb/fred-bf-patch-4
fix: revert proxy url changes
2024-06-26 17:42:29 +08:00
fred-bf 95e3b156c0
Update Dockerfile 2024-06-26 17:36:14 +08:00
Fred b972a0d081
feat: bump version 2024-06-24 14:45:45 +08:00
fred-bf 20749355da
Merge pull request #4841 from ImBIOS/patch-1
fix someone forgot to update license year to 2024
2024-06-24 14:35:11 +08:00
fred-bf dad122199a
Merge pull request from GHSA-gph5-rx77-3pjg
fix: validate the url to avoid SSRF
2024-06-24 14:33:31 +08:00
Fred 9fb8fbcc65
fix: validate the url to avoid SSRF 2024-06-24 14:31:50 +08:00
fred-bf 78e7ea72dc
Merge pull request #4869 from hengstchon/feat/models
feat: support model claude-3-5-sonnet-20240620
2024-06-24 14:20:35 +08:00
hengstchon 4640060891 feat: support model: claude-3-5-sonnet-20240620 2024-06-21 12:28:17 +02:00
Imamuzzaki Abu Salam 6efe4fb734
chore(app/layout.tsx): fix deprecated viewport nextjs 14 2024-06-16 10:17:58 +07:00
YeungYeah 74986803db feat: add google api safety setting 2024-06-15 12:09:58 +08:00
Imamuzzaki Abu Salam 9b0a705055
Update LICENSE 2024-06-14 09:19:38 +07:00
Imamuzzaki Abu Salam 163fc9e3a3
fix someone forgot to update license year to 2024 2024-06-14 08:45:06 +07:00
YeungYeah 24bf7950d8 chore: set the google safety setting to lowest 2024-06-12 21:59:28 +08:00
fred-bf b6735bffe4
Merge pull request #4826 from junxian-li-hpc/fix-webdav
Add new Teracloud domain
2024-06-07 17:03:36 +08:00
junxian li-ssslab win10 1d8fd480ca Add new Teracloud domain
- Added 'bora.teracloud.jp' to the list of supported domains.
2024-06-07 03:28:00 +08:00
DeanYao da2e2372aa
Merge pull request #4771 from yangxiang92/main
fix: make env PROXY_URL avaliable in Docker container.
2024-05-27 16:16:18 +08:00
josephrocca f3b972e573
Fix web url 2024-05-27 10:31:29 +08:00
xiang.yang bf3bc3c7e9 fix: make env PROXY_URL avaliable in Docker container. 2024-05-24 17:49:25 +08:00
fred-bf 38664487a0
Merge pull request #4689 from ReeseWang/main
Dockerfile: Listen to any addresses instead of localhost, fixes #4682
2024-05-22 14:19:42 +08:00
DeanYao de1111286c
Merge pull request #4743 from ChatGPTNextWeb/revert-4710-chatGPT
Revert "Chat gpt"
2024-05-20 19:03:11 +08:00
DeanYao d89a12aa05
Revert "Chat gpt" 2024-05-20 19:02:46 +08:00
DeanYao 754acd7c26
Merge pull request #4710 from Kivi1998/chatGPT
Chat gpt
2024-05-20 19:02:39 +08:00
DeanYao c3e2f3b714
Merge pull request #4732 from zhz8951/main
update translation
2024-05-20 17:48:45 +08:00
zhz8951 22ef3d3a46 update translation 2024-05-19 09:57:37 +08:00
Kivi1998 7f3516f44f
Merge branch 'main' into chatGPT 2024-05-16 15:14:43 +08:00
Hao Jia bfdb47a7ed ChatGPT Logo 2024-05-16 15:03:14 +08:00
DeanYao f55f04ab4f
Merge pull request #4671 from ChatGPTNextWeb/chore-fix
Chore fix
2024-05-16 14:51:06 +08:00
Hao Jia 01c9dbc1fd Merge remote-tracking branch 'origin/main' into gpt-4o
# Conflicts:
#	public/apple-touch-icon.png
2024-05-16 14:43:10 +08:00
Dean-YZG 0aa807df19 feat: remove empty memoryPrompt in ChatMessages 2024-05-16 14:41:18 +08:00
fred-bf 48d44ece58
Merge branch 'main' into chore-fix 2024-05-16 14:13:28 +08:00
fred-bf e58cb2b0db
chore: wrap the node command flag 2024-05-16 14:11:27 +08:00
fred-bf bffd9d9173
Merge pull request #4706 from leo4life2/patch-1
gpt-4o as vision model
2024-05-16 14:09:58 +08:00
Leo Li 8688842984
gpt-4o as vision model
https://platform.openai.com/docs/guides/vision
2024-05-15 17:53:27 -04:00
fred-bf cf29a8f2c8
chore: custom node listen address by `--host` flag 2024-05-15 18:13:40 +08:00
fred-bf 1e00c89988
Merge pull request #4703 from ChatGPTNextWeb/feat/gemini-flash
feat: add gemini flash into vision model list
2024-05-15 15:44:45 +08:00
fred-bf 0eccb547b5
Merge branch 'main' into feat/gemini-flash 2024-05-15 15:44:35 +08:00
Fred 4789a7f6a9
feat: add gemini flash into vision model list 2024-05-15 15:42:06 +08:00
fred-bf 0bf758afd4
Merge pull request #4702 from ChatGPTNextWeb/feat/gemini-flash
feat: support gemini flash
2024-05-15 15:30:23 +08:00
Fred 6612550c06
feat: support gemini flash 2024-05-15 15:29:38 +08:00
Ruoxi Wang d411159124
Dockerfile: Listen to any addresses instead of localhost, fixes #4682 2024-05-14 19:31:53 +08:00
fred-bf cf635a5e6f
Merge pull request #4684 from ChatGPTNextWeb/fred-bf-patch-4
feat: bump version
2024-05-14 17:36:06 +08:00
fred-bf 3a007e4f3d
feat: bump version 2024-05-14 17:35:58 +08:00
fred-bf 9faab960f6
Merge pull request #4674 from leo4life2/main
support gpt-4o
2024-05-14 14:36:23 +08:00
fred-bf 5df8b1d183
fix: revert gpt-4-turbo-preview detection 2024-05-14 14:32:34 +08:00
Leo Li ef5f910f19
support gpt-4o 2024-05-13 17:28:13 -04:00
Dean-YZG fffbee80e8 Merge remote-tracking branch 'origin/main' into chore-fix 2024-05-13 17:58:28 +08:00
DeanYao 6b30e167e1
Merge pull request #4647 from ChatGPTNextWeb/dependabot/npm_and_yarn/next-14.1.1
chore(deps): bump next from 13.4.9 to 14.1.1
2024-05-13 17:15:08 +08:00
DeanYao 8ec721259a
Merge pull request #4670 from DmitrySandalov/patch-1
Fix typo for "OpenAI Endpoint" in the en locale
2024-05-13 17:13:44 +08:00
Dean-YZG 9d7ce207b6 feat: support env var DEFAULT_INPUT_TEMPLATE to custom default template for preprocessing user inputs 2024-05-13 17:11:35 +08:00
Dean-YZG 2d1f0c9f57 feat: support env var DEFAULT_INPUT_TEMPLATE to custom default template for preprocessing user inputs 2024-05-13 17:11:11 +08:00
Dmitry Sandalov d3131d2f55
Fix typo for "OpenAI Endpoint" in the en locale 2024-05-13 10:39:49 +02:00
Dean-YZG c10447df79 feat: 1)upload image with type 'heic' 2)change the empty message to ';' for models 3) 2024-05-13 16:24:15 +08:00
DeanYao 212ae76d76
Merge pull request #4610 from rooben-me/fix-sync
Fix Sync Issue with Upstash
2024-05-13 11:28:29 +08:00
dependabot[bot] cd48f7eff4
chore(deps): bump next from 13.4.9 to 14.1.1
Bumps [next](https://github.com/vercel/next.js) from 13.4.9 to 14.1.1.
- [Release notes](https://github.com/vercel/next.js/releases)
- [Changelog](https://github.com/vercel/next.js/blob/canary/release.js)
- [Commits](https://github.com/vercel/next.js/compare/v13.4.9...v14.1.1)

---
updated-dependencies:
- dependency-name: next
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-05-10 00:27:02 +00:00
DeanYao 3513c6801e
Merge pull request #4626 from ChatGPTNextWeb/chore-fix
feat: googleApiKey & anthropicApiKey support setting multi-key
2024-05-07 15:06:02 +08:00
Dean-YZG 864529cbf6 feat: googleApiKey & anthropicApiKey support setting multi-key 2024-05-06 21:14:53 +08:00
DeanYao 58c0d3e12d
Merge pull request #4625 from ChatGPTNextWeb/chore-fix
feat: fix 1)the property named 'role' of the first message must be 'u…
2024-05-06 20:48:29 +08:00
Dean-YZG a1493bfb4e feat: bugfix 2024-05-06 20:46:53 +08:00
butterfly b3e856df1d feat: fix 1)the property named 'role' of the first message must be 'user' 2)if default summarize model 'gpt-3.5-turbo' is blocked, use currentModel instead 3)if apiurl&apikey set by location, useCustomConfig would be opened 2024-05-06 19:26:39 +08:00
ruban 8ef2617eec
Removed spaces 2024-05-02 23:24:41 -07:00
ruban 1da7d81122
Fix cloud data sync issue with Upstash (#4563) 2024-05-02 23:22:32 -07:00
ruban a103582346
fix 2024-05-02 23:10:10 -07:00
ruban 7b61d05e88
new fix 2024-05-02 23:08:17 -07:00
ruban 6fc7c50f19
this 2024-05-02 22:55:41 -07:00
ruban 9d728ec3c5
this is ti 2024-05-02 22:50:35 -07:00
ruban 9cd3358e4e
this is the fix 2024-05-02 22:40:52 -07:00
ruban 4cd94370e8 fix i think 2024-05-03 05:25:11 +00:00
fred-bf 52312dbd23
Merge pull request #4595 from ChatGPTNextWeb/feat/bump-version
feat: bump version code
2024-04-30 13:28:30 +08:00
Fred b2e8a1eaa2
feat: bump version code 2024-04-30 13:27:07 +08:00
DeanYao 506c17a093
Merge pull request #4564 from MrrDrr/gpt4v_remove_max_tokens
remove max_tokens from the official version of gpt4-turbo
2024-04-25 13:01:21 +08:00
DeanYao 69642fba52
Merge pull request #4557 from RoyRao2333/dev/no-fucos-outline
chore: No outline when element is in `:focus-visible` state
2024-04-25 12:58:19 +08:00
DeanYao 7d647c981f
Merge pull request #4535 from RubuJam/main
Refer to OpenAI documentation to delete some models.
2024-04-25 11:44:01 +08:00
DeanYao 9aec3b714e
Merge pull request #4545 from jalr4ever/main-default-model-env
Support a way to define default model by adding DEFAULT_MODEL env.
2024-04-25 10:58:14 +08:00
l.tingting dd4648ed9a remove max_tokens from the official version of gpt4-turbo 2024-04-24 22:59:14 +08:00
Roy 1cd0beb231 chore: No outline when element is in `:focus-visible` state 2024-04-23 11:48:54 +08:00
Wayland Zhan c96e4b7966 feat: Support a way to define default model by adding DEFAULT_MODEL env. 2024-04-19 06:57:15 +00:00
黑云白土 b7aab3c102
Update google.ts 2024-04-17 17:16:31 +08:00
黑云白土 fcb1a657e3
Update constant.ts 2024-04-17 16:24:11 +08:00
DeanYao 9b2cb1e1c3
Merge pull request #4525 from ChatGPTNextWeb/chore-fix
Chore fix
2024-04-16 14:59:22 +08:00
butterfly fb8b8d28da feat: (1) fix issues/4335 and issues/4518 2024-04-16 14:50:48 +08:00
DeanYao ad80153bbb
Merge pull request #4520 from Algorithm5838/refactor-models
Refactor DEFAULT_MODELS for better maintainability
2024-04-16 09:33:00 +08:00
Algorithm5838 9564b261d5 Update constant.ts 2024-04-15 13:14:14 +03:00
DeanYao 1e2a662fa6
Merge pull request #4412 from RubuJam/main
Gemini will generate the request address based on the selected model name and supports Gemini 1.5 Pro (gemini-1.5-pro-latest).
2024-04-15 11:44:53 +08:00
DeanYao 51f7daaeaf
Merge pull request #4514 from SukkaW/fix-ls-performance
perf: avoid read localStorage on every render
2024-04-15 10:11:03 +08:00
DeanYao f742a7ec4e
Merge pull request #4510 from MrrDrr/add_timezone_in_system_prompts
add timezone in system prompts
2024-04-15 10:09:53 +08:00
DeanYao e2c0d2a07b
Merge pull request #4509 from MrrDrr/add_knowledge_cutoff
add knowledge cutoff date for gpt-4-turbo-2024-04-09
2024-04-15 10:02:41 +08:00
DeanYao d112dc41b2
Merge pull request #4500 from PeterDaveHello/locale-tw-cht
Improve tw Traditional Chinese locale
2024-04-15 09:47:36 +08:00
SukkaW 2322851ac4 perf: avoid read localStorage on every render 2024-04-14 17:38:54 +08:00
l.tingting aa084ea09a add timezone in system prompts 2024-04-12 23:07:29 +08:00
l.tingting 6520f9b7eb add knowledge cutoff date for gpt-4-turbo-2024-04-09 2024-04-12 22:44:26 +08:00
butterfly fd8d0a1746 feat: fix the logtics of client joining webdav url 2024-04-12 14:20:15 +08:00
DeanYao af3ebacee6
Merge pull request #4507 from ChatGPTNextWeb/chore-fix
feat: fix codes of joining webdav url in client & webdav proxy
2024-04-12 14:07:24 +08:00
butterfly 55d7014301 feat: fix the logtics of client joining webdav url 2024-04-12 14:02:05 +08:00
butterfly b72d7fbeda feat: fix webdav 逻辑2 2024-04-12 13:46:37 +08:00
butterfly ee15c14049 feat: fix webdav 逻辑 2024-04-12 13:40:37 +08:00
Peter Dave Hello 1756bdd033 Improve tw Traditional Chinese locale 2024-04-12 00:18:15 +08:00
黑云白土 0cffaf8dc5
Merge branch 'main' into main 2024-04-11 10:30:05 +08:00
DeanYao 55a93e7b47
Merge pull request #4487 from leo4life2/main
Support `gpt-4-turbo` and `gpt-4-turbo-2024-04-09`
2024-04-11 09:26:08 +08:00
黑云白土 5dc5bfb797
Merge branch 'main' into main 2024-04-11 01:24:34 +08:00
Leo Li f101ee3c4f
support new vision models 2024-04-10 05:33:54 -04:00
Leo Li 6319f41b2c
add new turbo 2024-04-10 05:18:39 -04:00
Leo Li 6c718ada1b
Merge branch 'main' of github.com:ChatGPTNextWeb/ChatGPT-Next-Web 2024-04-10 05:14:44 -04:00
DeanYao 67acc38a1f
Merge pull request #4480 from ChatGPTNextWeb/chore-fix
feat: Solve the problem of using openai interface protocol for user-d…
2024-04-10 09:26:21 +08:00
DeanYao dd1d8509f0
Merge pull request #4476 from dlb-data/dlb-data-patch-1
Update layout.tsx
2024-04-10 09:13:22 +08:00
butterfly 79f342439a feat: Solve the problem of using openai interface protocol for user-defined claude model & add some famous webdav endpoints 2024-04-09 20:49:51 +08:00
DeanYao 13db64f0ec
Merge pull request #4479 from ChatGPTNextWeb/chore-fix
feat: white webdav server domain
2024-04-09 18:34:28 +08:00
butterfly 908ce3bbd9 feat: Optimize document 2024-04-09 18:25:51 +08:00
butterfly df3313971d feat: Optimize code 2024-04-09 18:24:22 +08:00
butterfly b175132854 feat: Optimize var names 2024-04-09 18:23:52 +08:00
butterfly 4cb0655192 feat: Optimize document 2024-04-09 18:17:00 +08:00
butterfly 8b191bd2f7 feat: white webdav server domain 2024-04-09 18:05:56 +08:00
DeanYao f3106e3bbb
Merge pull request #4477 from ChatGPTNextWeb/chore-fix
feat: 补充文档
2024-04-09 16:50:47 +08:00
butterfly 7fcfbc3729 feat: 补充文档 2024-04-09 16:49:51 +08:00
dlb-data 598468c2b7
Update layout.tsx 2024-04-09 16:34:21 +08:00
dlb-data 84681d3878
Update layout.tsx 2024-04-09 16:24:03 +08:00
DeanYao c7b14cba4d
Merge pull request #4470 from ChatGPTNextWeb/chore-fix
feat: fix system prompt
2024-04-09 10:45:55 +08:00
butterfly d508127452 feat: fix system prompt 2024-04-09 10:45:09 +08:00
DeanYao 984c79e2d2
Merge pull request #4469 from ChatGPTNextWeb/chore-fix
feat: remove debug code
2024-04-09 09:13:07 +08:00
butterfly 6cb296f952 feat: remove debug code 2024-04-09 09:12:18 +08:00
DeanYao db533fc166
Merge pull request #4466 from ChatGPTNextWeb/chore-fix
feat: modify some propmt in DEFAULT_INPUT_TEMPLATE about expressing l…
2024-04-08 19:33:27 +08:00
butterfly 02b0e79ba3 feat: modify some propmt in DEFAULT_INPUT_TEMPLATE about expressing latex 2024-04-08 19:27:22 +08:00
DeanYao 1b83dd0a8a
Merge pull request #4462 from ChatGPTNextWeb/chore-fix
feat: fix no max_tokens in payload when calling openai vision model
2024-04-08 18:31:52 +08:00
butterfly 9b982b408d feat: fix no max_tokens in payload when calling openai vision model 2024-04-08 18:29:08 +08:00
DeanYao 9b03ab830d
Merge pull request #4461 from ChatGPTNextWeb/chore-fix
feat: remove duplicate Input Template
2024-04-08 18:08:48 +08:00
butterfly 264da6798c feat: remove duplicate Input Template 2024-04-08 18:06:17 +08:00
DeanYao f68b8afa8d
Merge pull request #4457 from ChatGPTNextWeb/feat-multi-models
Feat multi models
2024-04-08 17:10:29 +08:00
butterfly 63f9063255 feat: call claude api not in credential 'include' mode 2024-04-08 15:33:27 +08:00
butterfly 6dad353e1c feat: call claude api not in credential 'include' mode 2024-04-08 15:33:02 +08:00
butterfly 5446d8d4a2 feat: fix illegal exports in app/api/anthropic/[...path]/route.ts 2024-04-08 13:59:55 +08:00
butterfly ef7617d545 feat: configs about app client 2024-04-08 13:41:02 +08:00
butterfly 0fbb560e90 feat: delete returned models in modals function of ClaudeApi instance 2024-04-07 20:05:19 +08:00
butterfly 86b5c55855 feat: roles must alternate between user and assistant in claude, so add a fake assistant message between two user messages 2024-04-07 18:02:31 +08:00
butterfly 768decde93 feat: parse response message 2024-04-07 15:20:27 +08:00
butterfly 3cb4315193 feat: clean codes 2024-04-07 11:50:25 +08:00
butterfly 69b079c86e feat: dev done 2024-04-07 11:32:57 +08:00
DeanYao 9f3fc5eb9f
Merge pull request #4417 from xiaotianxt/main
Update apple-touch-icon.png
2024-04-04 08:32:39 +08:00
butterfly 15e595837b feat: settings command dev done 2024-04-02 14:21:49 +08:00
xiaotianxt 17e57bb28e
feat: update apple-touch-icon.png 2024-03-30 11:38:20 +08:00
黑云白土 4d0c77b973
更新 constant.ts 2024-03-28 21:42:45 +08:00
黑云白土 f8b180ac44
Update google.ts 2024-03-28 15:52:38 +08:00
黑云白土 cd30368da9
Update constant.ts 2024-03-28 15:51:06 +08:00
黑云白土 27ed57a648
Update utils.ts 2024-03-28 15:49:49 +08:00
DeanYao e38b527ac2
Merge pull request #3205 from H0llyW00dzZ/summarizelogic
Refactor Summarize Logic
2024-03-28 15:19:32 +08:00
DeanYao 113d9612db
Merge pull request #3280 from surkaa/patch-1
Update .env.template 更正单词
2024-03-28 13:40:17 +08:00
DeanYao 6b3daec23f
Merge pull request #3314 from H0llyW00dzZ/text-moderation-azure
Feat ChatGPT LLM Api [Console Log] [Text Moderation] [Azure]
2024-03-28 13:38:56 +08:00
DeanYao e056a1d46d
Merge pull request #3405 from Yuliang-Lee/fix/MessageSelectorWarning
fix: MessageSelectorWarning
2024-03-28 11:38:45 +08:00
DeanYao 57026f6262
Merge pull request #3424 from H0llyW00dzZ/serverrside
Refactor Api Common [Server Side] [Console Log]
2024-03-28 11:29:48 +08:00
DeanYao 8ef77f50c3
Merge branch 'main' into serverrside 2024-03-28 11:20:52 +08:00
DeanYao 93e21515e5
Merge pull request #4408 from hmhuming/main
fix docker
2024-03-28 10:28:11 +08:00
hmhuming 24caa3b97b Merge branch 'main' of https://github.com/hmhuming/ChatGPT-Next-Web 2024-03-28 09:18:51 +08:00
DeanYao c93b36fe79
Merge pull request #3508 from reece00/Mask-language
The language filtering option of the mask is stored
2024-03-27 19:58:30 +08:00
DeanYao 0de9242a26
Merge pull request #3529 from erich2s/chat-item-selected-border
fix(chat-item): selected ChatItem showing border in other pages
2024-03-27 19:00:16 +08:00
hmhuming 53fb52c6c0 fix docker 2024-03-27 17:58:55 +08:00
DeanYao afaa529ba6
Merge pull request #3870 from Dup4/fix-webdav-check
fix: webdav check httpcode list
2024-03-27 14:04:15 +08:00
DeanYao 43824bd621
Merge pull request #4193 from MrrDrr/env_bug_fix
Update README.md
2024-03-26 17:53:57 +08:00
DeanYao 3c97a4f5a1
Merge pull request #4091 from H0llyW00dzZ/docker-ignore
Update Docker Ignore
2024-03-26 17:44:52 +08:00
DeanYao 711bf190d4
Merge pull request #4264 from ChatGPTNextWeb/dependabot/npm_and_yarn/tauri-apps/cli-1.5.11
chore(deps-dev): bump @tauri-apps/cli from 1.5.7 to 1.5.11
2024-03-26 17:32:01 +08:00
DeanYao 1049006cf9
Merge pull request #4366 from Essmatiko123/dependabot/npm_and_yarn/eslint-plugin-prettier-5.1.3
chore(deps-dev): bump eslint-plugin-prettier from 4.2.1 to 5.1.3
2024-03-26 13:57:50 +08:00
dependabot[bot] 76603d108d
chore(deps-dev): bump @tauri-apps/cli from 1.5.7 to 1.5.11
Bumps [@tauri-apps/cli](https://github.com/tauri-apps/tauri) from 1.5.7 to 1.5.11.
- [Release notes](https://github.com/tauri-apps/tauri/releases)
- [Commits](https://github.com/tauri-apps/tauri/compare/@tauri-apps/cli-v1.5.7...@tauri-apps/cli-v1.5.11)

---
updated-dependencies:
- dependency-name: "@tauri-apps/cli"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-26 05:42:55 +00:00
DeanYao 5bc3930230
Merge pull request #4389 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/react-18.2.70
chore(deps-dev): bump @types/react from 18.2.14 to 18.2.70
2024-03-26 13:41:13 +08:00
DeanYao e5edd851b3
Merge pull request #4390 from ChatGPTNextWeb/dependabot/npm_and_yarn/emoji-picker-react-4.9.2
chore(deps): bump emoji-picker-react from 4.5.15 to 4.9.2
2024-03-26 13:35:03 +08:00
dependabot[bot] dcad400758
chore(deps-dev): bump @types/react from 18.2.14 to 18.2.70
Bumps [@types/react](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react) from 18.2.14 to 18.2.70.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/react)

---
updated-dependencies:
- dependency-name: "@types/react"
  dependency-type: direct:development
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-26 05:26:51 +00:00
DeanYao a1aaea9c55
Merge pull request #4391 from ChatGPTNextWeb/dependabot/npm_and_yarn/types/node-20.11.30
chore(deps-dev): bump @types/node from 20.9.0 to 20.11.30
2024-03-26 13:24:24 +08:00
butterfly 4d6b981a54 bugfix: Delete the escapeDollarNumber function, which causes errors in rendering a latex string 2024-03-26 11:43:55 +08:00
DeanYao a4e4286e04
Merge pull request #4186 from MrrDrr/formula_rendering
support \(...\) and \[...\] style math formula
2024-03-25 19:55:57 +08:00
dependabot[bot] 6dd7a6a171
chore(deps-dev): bump @types/node from 20.9.0 to 20.11.30
Bumps [@types/node](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/node) from 20.9.0 to 20.11.30.
- [Release notes](https://github.com/DefinitelyTyped/DefinitelyTyped/releases)
- [Commits](https://github.com/DefinitelyTyped/DefinitelyTyped/commits/HEAD/types/node)

---
updated-dependencies:
- dependency-name: "@types/node"
  dependency-type: direct:development
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-25 10:47:55 +00:00
dependabot[bot] 8e554a87b0
chore(deps): bump emoji-picker-react from 4.5.15 to 4.9.2
Bumps [emoji-picker-react](https://github.com/ealush/emoji-picker-react) from 4.5.15 to 4.9.2.
- [Release notes](https://github.com/ealush/emoji-picker-react/releases)
- [Commits](https://github.com/ealush/emoji-picker-react/commits)

---
updated-dependencies:
- dependency-name: emoji-picker-react
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-25 10:46:24 +00:00
fred-bf f1b4c083a4
Merge pull request #4379 from EasonQwQ/main
Fix: Handle empty server response in API call
2024-03-24 14:18:14 +08:00
fred-bf 90af4e3b77
Merge pull request #4381 from ChatGPTNextWeb/fred-bf-patch-3
patch: disable webdav redirect
2024-03-24 14:17:11 +08:00
fred-bf e8d76a513d
patch: disable webdav redirect 2024-03-24 14:15:04 +08:00
kidv 29e03b88c7 Fix: Handle empty server response in API call 2024-03-24 04:07:25 +08:00
fred-bf ebbd870150
Merge pull request #4353 from H0llyW00dzZ/cherry-pick-webdav
[Cherry Pick] Fix Webdav Syncing Issues
2024-03-21 18:56:26 +08:00
Kivi1998 5bf402710f
Merge branch 'main' into main 2024-03-21 11:56:09 +08:00
H0llyW00dzZ c0c54e5709
Fix Webdav Syncing Issues
- [+] feat(route.ts): add endpoint validation and improve error handling
- [+] refactor(route.ts): use targetPath for request validation and error messages
- [+] fix(route.ts): correct targetUrl formation
2024-03-20 01:40:41 +07:00
fred-bf 3ba984d09e
Merge pull request #4306 from H0llyW00dzZ/simplify-cherry-pick
[Cherry Pick] Improve [Utils] Check Vision Model
2024-03-19 17:45:57 +08:00
fred-bf f274683d46
Merge pull request #4322 from imraax/dev
Fix "Enter" bug
2024-03-19 17:44:41 +08:00
Hao Jia 2053db4cfc ChatGPT Logo 2024-03-19 15:34:44 +08:00
fred-bf e20ce8e335
Merge pull request #4339 from ChatGPTNextWeb/fred-bf-patch-2
feat: update vercel deploy env
2024-03-18 18:25:24 +08:00
fred-bf 9fd750511c
feat: update vercel deploy env 2024-03-18 18:24:48 +08:00
Raax 028957fcdc Fix "Enter" bug
Fix Chinese input method "Enter" on Safari
2024-03-16 21:55:16 +08:00
H0llyW00dzZ a4c54cae60
Improve [Utils] Check Vision Model
- [+] refactor(utils.ts): improve isVisionModel function to use array.some instead of model.includes
2024-03-15 09:38:42 +07:00
Hao Jia 754303e7c7 v0.0.0 2024-03-14 16:36:32 +08:00
fred-bf cc0eae7153
Merge pull request #4288 from fred-bf/fix/migrate-proxy-url
fix: auto migrate proxy config
2024-03-14 03:05:08 +08:00
Fred 066ca9e552
fix: auto migrate proxy config 2024-03-14 03:03:46 +08:00
fred-bf 7c04a90d77
Merge pull request #4287 from fred-bf/main
feat: bump version
2024-03-14 02:30:58 +08:00
fred-bf a8a65ac769
Merge branch 'ChatGPTNextWeb:main' into main 2024-03-14 02:30:22 +08:00
Fred aec3c5d6cc
feat: bump version 2024-03-14 02:29:31 +08:00
fred-bf a22141c2eb
Merge pull request #4285 from fred-bf/fix/cors-ssrf
[Bugfix] Fix CORS SSRF security issue
2024-03-14 02:27:55 +08:00
Fred 99aa064319
fix: fix webdav sync issue 2024-03-14 01:58:25 +08:00
Fred 6aaf83f3c2
fix: fix upstash sync issue 2024-03-14 01:56:36 +08:00
Fred 133ce39a13
chore: update cors default path 2024-03-14 01:33:41 +08:00
Fred 8645214654
fix: change matching pattern 2024-03-14 01:26:13 +08:00
Fred eebc334e02
fix: remove corsFetch 2024-03-14 00:57:54 +08:00
Fred 038fa3b301
fix: add webdav request filter 2024-03-14 00:33:26 +08:00
Fred 9a8497299d
fix: adjust upstash api 2024-03-13 23:58:28 +08:00
fred-bf 61ce3868b5
Merge pull request #4279 from SukkaW/package-json-corepack
chore: specify yarn 1 in package.json
2024-03-13 20:09:57 +08:00
SukkaW 844c2a26bc chore: specify yarn 1 in package.json 2024-03-13 13:30:16 +08:00
fred-bf a15c4d9c20
Merge pull request #4234 from fengzai6/main
Fix EmojiPicker mobile width adaptation and update avatar clicking behavior
2024-03-11 13:59:09 +08:00
fred-bf ff9f0e60ac
Merge pull request #3972 from greenjerry/fix-export-garbled
fix: 修复导出时字符乱码问题
2024-03-07 17:07:16 +08:00
fred-bf 2bf6111bf5
Merge branch 'main' into fix-export-garbled 2024-03-07 17:07:08 +08:00
fengzai6 ad10a11903 Add z-index to avatar 2024-03-07 15:51:58 +08:00
fengzai6 c22153a4eb Revert "fix: No history message attached when for gemini-pro-vision"
This reverts commit c197962851.
2024-03-07 15:46:13 +08:00
fengzai6 5348d57057 Fix EmojiPicker mobile width adaptation and update avatar clicking behavior 2024-03-07 15:36:19 +08:00
fengzai6 052524dabd Merge remote-tracking branch 'upstream/main' 2024-03-07 15:32:09 +08:00
Leo Li e33d05cfe5
merge 2024-03-05 16:48:10 -05:00
fred-bf 5529ece220
Merge pull request #4218 from ChatGPTNextWeb/fred-bf-patch-1
chore: update GTM_ID definition
2024-03-05 17:37:22 +08:00
fred-bf e71094d4a8
chore: update GTM_ID definition, close #4217 2024-03-05 17:36:52 +08:00
fred-bf 98aa023d70
Merge pull request #4195 from aliceric27/main
slightly polishes the tw text.
2024-03-04 19:03:23 +08:00
aliceric27 e1066434d0 fix some text 2024-03-03 00:23:00 +08:00
aliceric27 86ae4b2a75 slightly polishes the tw text. 2024-03-02 23:58:23 +08:00
l.tingting ed8099bf1e Update README.md 2024-03-02 15:26:19 +08:00
l.tingting 524c9beee4 support \(...\) and \[...\] style math formula 2024-03-02 11:08:34 +08:00
fred-bf 99fb9dcf11
Merge pull request #4164 from KSnow616/main
feat: Pasting images into the textbox
2024-02-29 22:14:02 +08:00
fred-bf 1294817103
Merge pull request #4089 from H0llyW00dzZ/cherry-pick
[Cherry Pick] Fix [Utils] Regex trimTopic
2024-02-29 16:31:30 +08:00
Snow Kawashiro 9775660da7
Update chat.tsx 2024-02-28 20:45:42 +08:00
Snow Kawashiro e7051353eb
vision_model_only 2024-02-28 20:38:00 +08:00
Snow Kawashiro bd19e97cf8
add_image_pasting 2024-02-28 20:05:13 +08:00
fred-bf 8b821ac0c9
Merge pull request #4162 from fred-bf/fix/identify-vision-model
fix: fix the method to detect vision model
2024-02-28 11:35:22 +08:00
Fred 43e5dc2292
fix: fix the method to detect vision model 2024-02-28 11:33:43 +08:00
fred-bf 08fa22749a
fix: add max_tokens when using vision model (#4157) 2024-02-27 17:28:01 +08:00
fengzai6 c197962851 fix: No history message attached when for gemini-pro-vision 2024-02-27 15:02:58 +08:00
fred-bf 44a51273be
Merge pull request #4149 from fred-bf/feat/auto-detach-scrolling
feat: auto detach scrolling
2024-02-27 11:56:37 +08:00
Fred e3b3ae97bc
chore: clear scroll info 2024-02-27 11:49:44 +08:00
Fred 410a22dc63
feat: auto detach scrolling 2024-02-27 11:43:40 +08:00
Algorithm5838 069766d581
Correct cutoff dates (#4118) 2024-02-27 10:28:54 +08:00
DonaldBear f22e36e52f
feat(tw.ts): added new translations (#4142)
* feat(tw.ts): added new translations

I have translated previously untranslated text in response to the latest update.

* feat(tw.ts): added new translations

I have translated previously untranslated text in response to the latest update.
2024-02-27 00:16:56 +08:00
fred-bf bc1794fb4a
feat: bump version (#4133) 2024-02-26 18:15:00 +08:00
Fred aacd26c7db
feat: bump version 2024-02-26 18:14:10 +08:00
fred-bf ff166f7b4c
chore: adjust for ollama support (#4129) 2024-02-26 17:18:46 +08:00
H0llyW00dzZ bf1b5c3951
Update Docker Ignore
- [+] chore(dockerignore): update .dockerignore file with more comprehensive ignore rules
2024-02-21 08:46:21 +07:00
H0llyW00dzZ 22baebaf8c
[Cherry Pick] Fix [Utils] Regex trimTopic
- [+] fix(utils.ts): update regular expressions in trimTopic function to handle asterisks
2024-02-21 04:19:12 +07:00
H0llyW00dzZ e756506c18
[Cherry Pick] Improve Github Issue Template (#4041)
* Improve Github Issue Template

- [+] feat(issue templates): update issue templates from markdown to form schema
- [+] feat(issue templates): translate issue templates to Chinese
- [+] remove(issue templates): delete old markdown issue templates

* chore: remove Chinese template issue temporarily

---------

Co-authored-by: Fred <fred@nextchat.dev>
2024-02-20 18:11:02 +08:00
Qiying Wang fd67f980a5
Fix temperature range (#4083) 2024-02-20 18:05:17 +08:00
TheRam_ e2da3406d2
Add vision support (#4076) 2024-02-20 18:04:32 +08:00
Ikko Eltociear Ashimine 05b6d989b6
chore: fix typo in next.config.mjs (#4072)
verison -> version
2024-02-20 17:59:59 +08:00
H0llyW00dzZ 1d6ee64e1d
[Cherry Pick] Fix [UI/UX] [Front End] Settings Page (#4032)
* Fix [UI/UX] [Locales] Correct Spelling

- [+] fix(locales): correct spelling and improve wording in cn.ts and en.ts locale files

* Fix [UI/UX] [Front End] Settings Page

- [+] fix(settings.tsx): correct typo in ApiVerion to ApiVersion
- [+] refactor(settings.tsx): switch Azure.ApiKey to Google.ApiKey in ListItem title and subTitle

* Fix [UI/UX] [Locales] [SK] Correct Typo

- [+] fix(sk.ts): correct typo in ApiVersion key in Slovak locale file
2024-02-12 20:36:52 +08:00
greenjerry bf711f2ad7 修复导出json和markdown时中文及其他utf8字符乱码问题 2024-02-02 13:58:06 +08:00
Leo Li 3554872d9a
Add gpt-4-0125-preview 2024-01-25 15:09:48 -05:00
Dup4 86f42d56f2
fix: webdav check httpcode list
Signed-off-by: Dup4 <lyuzhi.pan@gmail.com>
2024-01-18 09:11:13 +08:00
dependabot[bot] f05bf0a6f6
chore(deps-dev): bump eslint-plugin-prettier from 4.2.1 to 5.1.3
Bumps [eslint-plugin-prettier](https://github.com/prettier/eslint-plugin-prettier) from 4.2.1 to 5.1.3.
- [Release notes](https://github.com/prettier/eslint-plugin-prettier/releases)
- [Changelog](https://github.com/prettier/eslint-plugin-prettier/blob/master/CHANGELOG.md)
- [Commits](https://github.com/prettier/eslint-plugin-prettier/compare/v4.2.1...v5.1.3)

---
updated-dependencies:
- dependency-name: eslint-plugin-prettier
  dependency-type: direct:development
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-01-15 10:20:50 +00:00
Eric Huang 943a2707d2 fix(chat-item): selected chat-item showing border in other pages 2023-12-15 09:37:37 +08:00
reece00 1442337e3c The language filtering option of the mask is stored 2023-12-12 02:22:22 +08:00
H0llyW00dzZ 8dc8682078
Fix Api Common [Server Side]
- [+] fix(common.ts): improve handling of OpenAI-Organization header
 - Check if serverConfig.openaiOrgId is defined and not an empty string
 - Log the value of openaiOrganizationHeader if present, otherwise log that the header is not present
 - Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
2023-12-04 13:33:23 +07:00
H0llyW00dzZ 36e9c6ac4d
Refactor Api Common [Server Side] [Console Log]
- [+] refactor(common.ts): remove unnecessary console.log for [Org ID] in requestOpenai function
- [+] refactor(common.ts): conditionally delete OpenAI-Organization header from response if [Org ID] is not set up in ENV
2023-12-01 19:49:12 +07:00
frankylli 10ea9bf1e3 fix: MessageSelectorWarning 2023-11-29 16:25:15 +08:00
H0llyW00dzZ fe0f078353
Feat ChatGPT LLM Api [Console Log] [Text Moderation] [Azure]
[+] fix(openai.ts): fix parsing error in ChatGPTApi's message handler
[+] feat(openai.ts): add logging for flagged categories in text moderation
2023-11-19 19:49:52 +07:00
SurKaa 39f3afd52c
Update .env.template 更正单词 2023-11-16 09:22:56 +08:00
H0llyW00dzZ 544bab0fe2
Refactor Summarize Logic
[+] chore(chat.ts): remove unnecessary comment and refactor variable name
[+] feat(chat.ts): add stream: false to config object
2023-11-09 20:56:45 +07:00
Yidadaa cdf0311d27 feat: add claude and bard 2023-11-07 23:22:11 +08:00
Yidadaa 5610f423d0 feat: add multi-model support 2023-10-30 02:07:11 +08:00
252 changed files with 33643 additions and 4200 deletions

View File

@ -1,8 +1,97 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Node.js dependencies
/node_modules
/jspm_packages
# TypeScript v1 declaration files
typings
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variable files
.env
.env.test
# local env files # local env files
.env*.local .env*.local
# docker-compose env files # Next.js build output
.env .next
out
# Nuxt.js build output
.nuxt
dist
# Gatsby files
.cache/
# Vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# Temporary folders
tmp
temp
# IDE and editor directories
.idea
.vscode
*.swp
*.swo
*~
# OS generated files
.DS_Store
Thumbs.db
# secret key
*.key *.key
*.key.pub *.key.pub

View File

@ -1,21 +1,28 @@
# Your openai api key. (required) # Your openai api key. (required)
OPENAI_API_KEY=sk-xxxx OPENAI_API_KEY=sk-xxxx
# Access passsword, separated by comma. (optional) # DeepSeek Api Key. (Optional)
DEEPSEEK_API_KEY=
# Access password, separated by comma. (optional)
CODE=your-password CODE=your-password
# You can start service behind a proxy # You can start service behind a proxy. (optional)
PROXY_URL=http://localhost:7890 PROXY_URL=http://localhost:7890
# Enable MCP functionality (optional)
# Default: Empty (disabled)
# Set to "true" to enable MCP functionality
ENABLE_MCP=
# (optional) # (optional)
# Default: Empty # Default: Empty
# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. # Google Gemini Pro API key, set if you want to use Google Gemini Pro API.
GOOGLE_API_KEY= GOOGLE_API_KEY=
# (optional) # (optional)
# Default: https://generativelanguage.googleapis.com/ # Default: https://generativelanguage.googleapis.com/
# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. # Google Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url.
GOOGLE_URL= GOOGLE_URL=
# Override openai api request base url. (optional) # Override openai api request base url. (optional)
@ -47,3 +54,30 @@ ENABLE_BALANCE_QUERY=
# If you want to disable parse settings from url, set this value to 1. # If you want to disable parse settings from url, set this value to 1.
DISABLE_FAST_LINK= DISABLE_FAST_LINK=
# (optional)
# Default: Empty
# To control custom models, use + to add a custom model, use - to hide a model, use name=displayName to customize model name, separated by comma.
CUSTOM_MODELS=
# (optional)
# Default: Empty
# Change default model
DEFAULT_MODEL=
# anthropic claude Api Key.(optional)
ANTHROPIC_API_KEY=
### anthropic claude Api version. (optional)
ANTHROPIC_API_VERSION=
### anthropic claude Api url (optional)
ANTHROPIC_URL=
### (optional)
WHITE_WEBDAV_ENDPOINTS=
### siliconflow Api key (optional)
SILICONFLOW_API_KEY=
### siliconflow Api url (optional)
SILICONFLOW_URL=

View File

@ -1 +1,3 @@
public/serviceWorker.js public/serviceWorker.js
app/mcp/mcp_config.json
app/mcp/mcp_config.default.json

View File

@ -1,4 +1,7 @@
{ {
"extends": "next/core-web-vitals", "extends": "next/core-web-vitals",
"plugins": ["prettier"] "plugins": ["prettier", "unused-imports"],
"rules": {
"unused-imports/no-unused-imports": "warn"
}
} }

80
.github/ISSUE_TEMPLATE/1_bug_report.yml vendored Normal file
View File

@ -0,0 +1,80 @@
name: '🐛 Bug Report'
description: 'Report an bug'
title: '[Bug] '
labels: ['bug']
body:
- type: dropdown
attributes:
label: '📦 Deployment Method'
multiple: true
options:
- 'Official installation package'
- 'Vercel'
- 'Zeabur'
- 'Sealos'
- 'Netlify'
- 'Docker'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 Version'
validations:
required: true
- type: dropdown
attributes:
label: '💻 Operating System'
multiple: true
options:
- 'Windows'
- 'macOS'
- 'Ubuntu'
- 'Other Linux'
- 'iOS'
- 'iPad OS'
- 'Android'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 System Version'
validations:
required: true
- type: dropdown
attributes:
label: '🌐 Browser'
multiple: true
options:
- 'Chrome'
- 'Edge'
- 'Safari'
- 'Firefox'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 Browser Version'
validations:
required: true
- type: textarea
attributes:
label: '🐛 Bug Description'
description: A clear and concise description of the bug, if the above option is `Other`, please also explain in detail.
validations:
required: true
- type: textarea
attributes:
label: '📷 Recurrence Steps'
description: A clear and concise description of how to recurrence.
- type: textarea
attributes:
label: '🚦 Expected Behavior'
description: A clear and concise description of what you expected to happen.
- type: textarea
attributes:
label: '📝 Additional Information'
description: If your problem needs further explanation, or if the issue you're seeing cannot be reproduced in a gist, please add more information here.

View File

@ -0,0 +1,80 @@
name: '🐛 反馈缺陷'
description: '反馈一个问题/缺陷'
title: '[Bug] '
labels: ['bug']
body:
- type: dropdown
attributes:
label: '📦 部署方式'
multiple: true
options:
- '官方安装包'
- 'Vercel'
- 'Zeabur'
- 'Sealos'
- 'Netlify'
- 'Docker'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 软件版本'
validations:
required: true
- type: dropdown
attributes:
label: '💻 系统环境'
multiple: true
options:
- 'Windows'
- 'macOS'
- 'Ubuntu'
- 'Other Linux'
- 'iOS'
- 'iPad OS'
- 'Android'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 系统版本'
validations:
required: true
- type: dropdown
attributes:
label: '🌐 浏览器'
multiple: true
options:
- 'Chrome'
- 'Edge'
- 'Safari'
- 'Firefox'
- 'Other'
validations:
required: true
- type: input
attributes:
label: '📌 浏览器版本'
validations:
required: true
- type: textarea
attributes:
label: '🐛 问题描述'
description: 请提供一个清晰且简洁的问题描述,若上述选项为`Other`,也请详细说明。
validations:
required: true
- type: textarea
attributes:
label: '📷 复现步骤'
description: 请提供一个清晰且简洁的描述,说明如何复现问题。
- type: textarea
attributes:
label: '🚦 期望结果'
description: 请提供一个清晰且简洁的描述,说明您期望发生什么。
- type: textarea
attributes:
label: '📝 补充信息'
description: 如果您的问题需要进一步说明,或者您遇到的问题无法在一个简单的示例中复现,请在这里添加更多信息。

View File

@ -0,0 +1,21 @@
name: '🌠 Feature Request'
description: 'Suggest an idea'
title: '[Feature Request] '
labels: ['enhancement']
body:
- type: textarea
attributes:
label: '🥰 Feature Description'
description: Please add a clear and concise description of the problem you are seeking to solve with this feature request.
validations:
required: true
- type: textarea
attributes:
label: '🧐 Proposed Solution'
description: Describe the solution you'd like in a clear and concise manner.
validations:
required: true
- type: textarea
attributes:
label: '📝 Additional Information'
description: Add any other context about the problem here.

View File

@ -0,0 +1,21 @@
name: '🌠 功能需求'
description: '提出需求或建议'
title: '[Feature Request] '
labels: ['enhancement']
body:
- type: textarea
attributes:
label: '🥰 需求描述'
description: 请添加一个清晰且简洁的问题描述,阐述您希望通过这个功能需求解决的问题。
validations:
required: true
- type: textarea
attributes:
label: '🧐 解决方案'
description: 请清晰且简洁地描述您想要的解决方案。
validations:
required: true
- type: textarea
attributes:
label: '📝 补充信息'
description: 在这里添加关于问题的任何其他背景信息。

View File

@ -1,43 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: "[Bug] "
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Deployment**
- [ ] Docker
- [ ] Vercel
- [ ] Server
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional Logs**
Add any logs about the problem here.

View File

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[Feature] "
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@ -1,24 +0,0 @@
---
name: 功能建议
about: 请告诉我们你的灵光一闪
title: "[Feature] "
labels: ''
assignees: ''
---
> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。
> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724)
**这个功能与现有的问题有关吗?**
如果有关,请在此列出链接或者描述问题。
**你想要什么功能或者有什么建议?**
尽管告诉我们。
**有没有可以参考的同类竞品?**
可以给出参考产品的链接或者截图。
**其他信息**
可以说说你的其他考虑。

View File

@ -1,36 +0,0 @@
---
name: 反馈问题
about: 请告诉我们你遇到的问题
title: "[Bug] "
labels: ''
assignees: ''
---
> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。
> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724)
**反馈须知**
⚠️ 注意:不遵循此模板的任何帖子都会被立即关闭,如果没有提供下方的信息,我们无法定位你的问题。
请在下方中括号内输入 x 来表示你已经知晓相关内容。
- [ ] 我确认已经在 [常见问题](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/faq-cn.md) 中搜索了此次反馈的问题,没有找到解答;
- [ ] 我确认已经在 [Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) 列表(包括已经 Close 的)中搜索了此次反馈的问题,没有找到解答。
- [ ] 我确认已经在 [Vercel 使用教程](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/vercel-cn.md) 中搜索了此次反馈的问题,没有找到解答。
**描述问题**
请在此描述你遇到了什么问题。
**如何复现**
请告诉我们你是通过什么操作触发的该问题。
**截图**
请在此提供控制台截图、屏幕截图或者服务端的 log 截图。
**一些必要的信息**
- 系统:[比如 windows 10/ macos 12/ linux / android 11 / ios 16]
- 浏览器: [比如 chrome, safari]
- 版本: [填写设置页面的版本号]
- 部署方式:[比如 vercel、docker 或者服务器部署]

28
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,28 @@
#### 💻 变更类型 | Change Type
<!-- For change type, change [ ] to [x]. -->
- [ ] feat <!-- 引入新功能 | Introduce new features -->
- [ ] fix <!-- 修复 Bug | Fix a bug -->
- [ ] refactor <!-- 重构代码(既不修复 Bug 也不添加新功能) | Refactor code that neither fixes a bug nor adds a feature -->
- [ ] perf <!-- 提升性能的代码变更 | A code change that improves performance -->
- [ ] style <!-- 添加或更新不影响代码含义的样式文件 | Add or update style files that do not affect the meaning of the code -->
- [ ] test <!-- 添加缺失的测试或纠正现有的测试 | Adding missing tests or correcting existing tests -->
- [ ] docs <!-- 仅文档更新 | Documentation only changes -->
- [ ] ci <!-- 修改持续集成配置文件和脚本 | Changes to our CI configuration files and scripts -->
- [ ] chore <!-- 其他不修改 src 或 test 文件的变更 | Other changes that dont modify src or test files -->
- [ ] build <!-- 进行架构变更 | Make architectural changes -->
#### 🔀 变更说明 | Description of Change
<!--
感谢您的 Pull Request ,请提供此 Pull Request 的变更说明
Thank you for your Pull Request. Please provide a description above.
-->
#### 📝 补充信息 | Additional Information
<!--
请添加与此 Pull Request 相关的补充信息
Add any other context about the Pull Request here.
-->

View File

@ -3,9 +3,7 @@ name: VercelPreviewDeployment
on: on:
pull_request_target: pull_request_target:
types: types:
- opened - review_requested
- synchronize
- reopened
env: env:
VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }}
@ -49,7 +47,7 @@ jobs:
run: npm install --global vercel@latest run: npm install --global vercel@latest
- name: Cache dependencies - name: Cache dependencies
uses: actions/cache@v2 uses: actions/cache@v4
id: cache-npm id: cache-npm
with: with:
path: ~/.npm path: ~/.npm

39
.github/workflows/test.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Run Tests
on:
push:
branches:
- main
tags:
- "!*"
pull_request:
types:
- review_requested
jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: 18
cache: "yarn"
- name: Cache node_modules
uses: actions/cache@v4
with:
path: node_modules
key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }}
restore-keys: |
${{ runner.os }}-node_modules-
- name: Install dependencies
run: yarn install
- name: Run Jest tests
run: yarn test:ci

7
.gitignore vendored
View File

@ -43,4 +43,9 @@ dev
.env .env
*.key *.key
*.key.pub *.key.pub
masks.json
# mcp config
app/mcp/mcp_config.json

View File

@ -34,16 +34,20 @@ ENV PROXY_URL=""
ENV OPENAI_API_KEY="" ENV OPENAI_API_KEY=""
ENV GOOGLE_API_KEY="" ENV GOOGLE_API_KEY=""
ENV CODE="" ENV CODE=""
ENV ENABLE_MCP=""
COPY --from=builder /app/public ./public COPY --from=builder /app/public ./public
COPY --from=builder /app/.next/standalone ./ COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder /app/.next/server ./.next/server COPY --from=builder /app/.next/server ./.next/server
RUN mkdir -p /app/app/mcp && chmod 777 /app/app/mcp
COPY --from=builder /app/app/mcp/mcp_config.default.json /app/app/mcp/mcp_config.json
EXPOSE 3000 EXPOSE 3000
CMD if [ -n "$PROXY_URL" ]; then \ CMD if [ -n "$PROXY_URL" ]; then \
export HOSTNAME="127.0.0.1"; \ export HOSTNAME="0.0.0.0"; \
protocol=$(echo $PROXY_URL | cut -d: -f1); \ protocol=$(echo $PROXY_URL | cut -d: -f1); \
host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \
port=$(echo $PROXY_URL | cut -d: -f3); \ port=$(echo $PROXY_URL | cut -d: -f3); \

View File

@ -1,6 +1,6 @@
MIT License MIT License
Copyright (c) 2023 Zhang Yifei Copyright (c) 2023-2025 NextChat
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

293
README.md
View File

@ -1,40 +1,78 @@
<div align="center"> <div align="center">
<img src="./docs/images/head-cover.png" alt="icon"/>
<h1 align="center">NextChat (ChatGPT Next Web)</h1> <a href='https://nextchat.club'>
<img src="https://github.com/user-attachments/assets/83bdcc07-ae5e-4954-a53a-ac151ba6ccf3" width="1000" alt="icon"/>
</a>
<h1 align="center">NextChat</h1>
English / [简体中文](./README_CN.md) English / [简体中文](./README_CN.md)
One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. <a href="https://trendshift.io/repositories/5973" target="_blank"><img src="https://trendshift.io/api/badge/repositories/5973" alt="ChatGPTNextWeb%2FChatGPT-Next-Web | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。
✨ Light and Fast AI Assistant,with Claude, DeepSeek, GPT4 & Gemini Pro support.
[![Saas][Saas-image]][saas-url]
[![Web][Web-image]][web-url] [![Web][Web-image]][web-url]
[![Windows][Windows-image]][download-url] [![Windows][Windows-image]][download-url]
[![MacOS][MacOS-image]][download-url] [![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url] [![Linux][Linux-image]][download-url]
[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) [NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues)
[web-url]: https://chatgpt.nextweb.fun [saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.dev/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows [Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://vercel.com/button" alt="Deploy on Vercel" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/ChatGPTNextWeb/NextChat)
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) [<img src="https://github.com/user-attachments/assets/903482d4-3e87-4134-9af1-f2588fa90659" height="50" width="" >](https://monica.im/?utm=nxcrp)
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
![cover](./docs/images/cover.png)
</div> </div>
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
## 🫣 NextChat Support MCP !
> Before build, please set env ENABLE_MCP=true
<img src="https://github.com/user-attachments/assets/d8851f40-4e36-4335-b1a4-ec1e11488c7e"/>
## Enterprise Edition
Meeting Your Company's Privatization and Customization Deployment Requirements:
- **Brand Customization**: Tailored VI/UI to seamlessly align with your corporate brand image.
- **Resource Integration**: Unified configuration and management of dozens of AI resources by company administrators, ready for use by team members.
- **Permission Control**: Clearly defined member permissions, resource permissions, and knowledge base permissions, all controlled via a corporate-grade Admin Panel.
- **Knowledge Integration**: Combining your internal knowledge base with AI capabilities, making it more relevant to your company's specific business needs compared to general AI.
- **Security Auditing**: Automatically intercept sensitive inquiries and trace all historical conversation records, ensuring AI adherence to corporate information security standards.
- **Private Deployment**: Enterprise-level private deployment supporting various mainstream private cloud solutions, ensuring data security and privacy protection.
- **Continuous Updates**: Ongoing updates and upgrades in cutting-edge capabilities like multimodal AI, ensuring consistent innovation and advancement.
For enterprise inquiries, please contact: **business@nextchat.dev**
## Screenshots
![Settings](./docs/images/settings.png)
![More](./docs/images/more.png)
## Features ## Features
- **Deploy for free with one-click** on Vercel in under 1 minute - **Deploy for free with one-click** on Vercel in under 1 minute
@ -49,6 +87,12 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
- Automatically compresses chat history to support long conversations while also saving your tokens - Automatically compresses chat history to support long conversations while also saving your tokens
- I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia - I18n: English, 简体中文, 繁体中文, 日本語, Français, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia
<div align="center">
![主界面](./docs/images/cover.png)
</div>
## Roadmap ## Roadmap
- [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138) - [x] System Prompt: pin a user defined prompt as system prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)
@ -57,52 +101,25 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
- [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741) - [x] Share as image, share to ShareGPT [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741)
- [x] Desktop App with tauri - [x] Desktop App with tauri
- [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. - [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc.
- [ ] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092)
- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353)
- [x] Supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- [ ] local knowledge base
## What's New ## What's New
- 🚀 v2.15.8 Now supports Realtime Chat [#5672](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5672)
- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379)
- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins)
- 🚀 v2.14.0 Now supports Artifacts & SD
- 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.10.1 support Google Gemini Pro model.
- 🚀 v2.9.11 you can use azure endpoint now. - 🚀 v2.9.11 you can use azure endpoint now.
- 🚀 v2.8 now we have a client that runs across all platforms! - 🚀 v2.8 now we have a client that runs across all platforms!
- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! - 🚀 v2.7 let's share conversations as image, or share to ShareGPT!
- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). - 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/).
## 主要功能
- 在 1 分钟内使用 Vercel **免费一键部署**
- 提供体积极小(~5MB的跨平台客户端Linux/Windows/MacOS, [下载地址](https://github.com/Yidadaa/ChatGPT-Next-Web/releases)
- 完整的 Markdown 支持LaTex 公式、Mermaid 流程图、代码高亮等等
- 精心设计的 UI响应式设计支持深色模式支持 PWA
- 极快的首屏加载速度(~100kb支持流式响应
- 隐私安全,所有数据保存在用户浏览器本地
- 预制角色功能(面具),方便地创建、分享和调试你的个性化对话
- 海量的内置 prompt 列表,来自[中文](https://github.com/PlexPt/awesome-chatgpt-prompts-zh)和[英文](https://github.com/f/awesome-chatgpt-prompts)
- 自动压缩上下文聊天记录,在节省 Token 的同时支持超长对话
- 多国语言支持English, 简体中文, 繁体中文, 日本語, Español, Italiano, Türkçe, Deutsch, Tiếng Việt, Русский, Čeština, 한국어, Indonesia
- 拥有自己的域名?好上加好,绑定后即可在任何地方**无障碍**快速访问
## 开发计划
- [x] 为每个对话设置系统 Prompt [#138](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)
- [x] 允许用户自行编辑内置 Prompt 列表
- [x] 预制角色:使用预制角色快速定制新对话 [#993](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/993)
- [x] 分享为图片,分享到 ShareGPT 链接 [#1741](https://github.com/Yidadaa/ChatGPT-Next-Web/pull/1741)
- [x] 使用 tauri 打包桌面应用
- [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm)
- [ ] 插件机制,支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165)
## 最新动态
- 🚀 v2.0 已经发布,现在你可以使用面具功能快速创建预制对话了! 了解更多: [ChatGPT 提示词高阶技能:零次、一次和少样本提示](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/138)。
- 💡 想要更方便地随时随地使用本项目可以试下这款桌面插件https://github.com/mushan0x0/AI0x0.com
- 🚀 v2.7 现在可以将会话分享为图片了,也可以分享到 ShareGPT 的在线链接。
- 🚀 v2.8 发布了横跨 Linux/Windows/MacOS 的体积极小的客户端。
- 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。
## Get Started ## Get Started
> [简体中文 > 如何开始使用](./README_CN.md#开始使用)
1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys); 1. Get [OpenAI API Key](https://platform.openai.com/account/api-keys);
2. Click 2. Click
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password; [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web), remember that `CODE` is your page password;
@ -110,14 +127,10 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
## FAQ ## FAQ
[简体中文 > 常见问题](./docs/faq-cn.md)
[English > FAQ](./docs/faq-en.md) [English > FAQ](./docs/faq-en.md)
## Keep Updated ## Keep Updated
> [简体中文 > 如何保持代码更新](./README_CN.md#保持更新)
If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly. If you have deployed your own project with just one click following the steps above, you may encounter the issue of "Updates Available" constantly showing up. This is because Vercel will create a new project for you by default instead of forking this project, resulting in the inability to detect updates correctly.
We recommend that you follow the steps below to re-deploy: We recommend that you follow the steps below to re-deploy:
@ -128,7 +141,7 @@ We recommend that you follow the steps below to re-deploy:
### Enable Automatic Updates ### Enable Automatic Updates
> If you encounter a failure of Upstream Sync execution, please manually sync fork once. > If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code).
After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour:
@ -144,8 +157,6 @@ You can star or watch this project or follow author to get release notifications
## Access Password ## Access Password
> [简体中文 > 如何增加访问密码](./README_CN.md#配置页面访问密码)
This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this: This project provides limited access control. Please add an environment variable named `CODE` on the vercel environment variables page. The value should be passwords separated by comma like this:
``` ```
@ -156,8 +167,6 @@ After adding or modifying this environment variable, please redeploy the project
## Environment Variables ## Environment Variables
> [简体中文 > 如何配置 api key、访问密码、接口代理](./README_CN.md#环境变量)
### `CODE` (optional) ### `CODE` (optional)
Access password, separated by comma. Access password, separated by comma.
@ -180,7 +189,7 @@ Specify OpenAI organization ID.
### `AZURE_URL` (optional) ### `AZURE_URL` (optional)
> Example: https://{azure-resource-url}/openai/deployments/{deploy-name} > Example: https://{azure-resource-url}/openai
Azure deploy url. Azure deploy url.
@ -200,6 +209,74 @@ Google Gemini Pro Api Key.
Google Gemini Pro Api Url. Google Gemini Pro Api Url.
### `ANTHROPIC_API_KEY` (optional)
anthropic claude Api Key.
### `ANTHROPIC_API_VERSION` (optional)
anthropic claude Api version.
### `ANTHROPIC_URL` (optional)
anthropic claude Api Url.
### `BAIDU_API_KEY` (optional)
Baidu Api Key.
### `BAIDU_SECRET_KEY` (optional)
Baidu Secret Key.
### `BAIDU_URL` (optional)
Baidu Api Url.
### `BYTEDANCE_API_KEY` (optional)
ByteDance Api Key.
### `BYTEDANCE_URL` (optional)
ByteDance Api Url.
### `ALIBABA_API_KEY` (optional)
Alibaba Cloud Api Key.
### `ALIBABA_URL` (optional)
Alibaba Cloud Api Url.
### `IFLYTEK_URL` (Optional)
iflytek Api Url.
### `IFLYTEK_API_KEY` (Optional)
iflytek Api Key.
### `IFLYTEK_API_SECRET` (Optional)
iflytek Api Secret.
### `CHATGLM_API_KEY` (optional)
ChatGLM Api Key.
### `CHATGLM_URL` (optional)
ChatGLM Api Url.
### `DEEPSEEK_API_KEY` (optional)
DeepSeek Api Key.
### `DEEPSEEK_URL` (optional)
DeepSeek Api Url.
### `HIDE_USER_API_KEY` (optional) ### `HIDE_USER_API_KEY` (optional)
> Default: Empty > Default: Empty
@ -216,7 +293,7 @@ If you do not want users to use GPT-4, set this value to 1.
> Default: Empty > Default: Empty
If you do want users to query balance, set this value to 1, or you should set it to 0. If you do want users to query balance, set this value to 1.
### `DISABLE_FAST_LINK` (optional) ### `DISABLE_FAST_LINK` (optional)
@ -233,13 +310,62 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model
User `-all` to disable all default models, `+all` to enable all default models. User `-all` to disable all default models, `+all` to enable all default models.
For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name.
> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list.
> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list.
For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name.
> Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list.
### `DEFAULT_MODEL` optional
Change default model
### `VISION_MODELS` (optional)
> Default: Empty
> Example: `gpt-4-vision,claude-3-opus,my-custom-model` means add vision capabilities to these models in addition to the default pattern matches (which detect models containing keywords like "vision", "claude-3", "gemini-1.5", etc).
Add additional models to have vision capabilities, beyond the default pattern matching. Multiple models should be separated by commas.
### `WHITE_WEBDAV_ENDPOINTS` (optional)
You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format
- Each address must be a complete endpoint
> `https://xxxx/yyy`
- Multiple addresses are connected by ', '
### `DEFAULT_INPUT_TEMPLATE` (optional)
Customize the default template used to initialize the User Input Preprocessing configuration item in Settings.
### `STABILITY_API_KEY` (optional)
Stability API key.
### `STABILITY_URL` (optional)
Customize Stability API url.
### `ENABLE_MCP` (optional)
Enable MCPModel Context ProtocolFeature
### `SILICONFLOW_API_KEY` (optional)
SiliconFlow API Key.
### `SILICONFLOW_URL` (optional)
SiliconFlow API URL.
## Requirements ## Requirements
NodeJS >= 18, Docker >= 20 NodeJS >= 18, Docker >= 20
## Development ## Development
> [简体中文 > 如何进行二次开发](./README_CN.md#开发)
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
@ -264,7 +390,6 @@ yarn dev
## Deployment ## Deployment
> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署)
### Docker (Recommended) ### Docker (Recommended)
@ -293,6 +418,16 @@ If your proxy needs password, use:
-e PROXY_URL="http://127.0.0.1:7890 user pass" -e PROXY_URL="http://127.0.0.1:7890 user pass"
``` ```
If enable MCP, use
```
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=your-password \
-e ENABLE_MCP=true \
yidadaa/chatgpt-next-web
```
### Shell ### Shell
```shell ```shell
@ -313,11 +448,7 @@ bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/s
- [How to use Vercel (No English)](./docs/vercel-cn.md) - [How to use Vercel (No English)](./docs/vercel-cn.md)
- [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md) - [User Manual (Only Chinese, WIP)](./docs/user-manual-cn.md)
## Screenshots
![Settings](./docs/images/settings.png)
![More](./docs/images/more.png)
## Translation ## Translation
@ -329,37 +460,7 @@ If you want to add a new translation, read this [document](./docs/translation.md
## Special Thanks ## Special Thanks
### Sponsor
> 仅列出捐赠金额 >= 100RMB 的用户。
[@mushan0x0](https://github.com/mushan0x0)
[@ClarenceDan](https://github.com/ClarenceDan)
[@zhangjia](https://github.com/zhangjia)
[@hoochanlon](https://github.com/hoochanlon)
[@relativequantum](https://github.com/relativequantum)
[@desenmeng](https://github.com/desenmeng)
[@webees](https://github.com/webees)
[@chazzhou](https://github.com/chazzhou)
[@hauy](https://github.com/hauy)
[@Corwin006](https://github.com/Corwin006)
[@yankunsong](https://github.com/yankunsong)
[@ypwhs](https://github.com/ypwhs)
[@fxxxchao](https://github.com/fxxxchao)
[@hotic](https://github.com/hotic)
[@WingCH](https://github.com/WingCH)
[@jtung4](https://github.com/jtung4)
[@micozhu](https://github.com/micozhu)
[@jhansion](https://github.com/jhansion)
[@Sha1rholder](https://github.com/Sha1rholder)
[@AnsonHyq](https://github.com/AnsonHyq)
[@synwith](https://github.com/synwith)
[@piksonGit](https://github.com/piksonGit)
[@ouyangzhiping](https://github.com/ouyangzhiping)
[@wenjiavv](https://github.com/wenjiavv)
[@LeXwDeX](https://github.com/LeXwDeX)
[@Licoy](https://github.com/Licoy)
[@shangmin2009](https://github.com/shangmin2009)
### Contributors ### Contributors

View File

@ -1,22 +1,35 @@
<div align="center"> <div align="center">
<img src="./docs/images/icon.svg" alt="预览"/>
<a href='#企业版'>
<img src="./docs/images/ent.svg" alt="icon"/>
</a>
<h1 align="center">NextChat</h1> <h1 align="center">NextChat</h1>
一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 一键免费部署你的私人 ChatGPT 网页应用,支持 Claude, GPT4 & Gemini Pro 模型。
[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [NextChatAI](https://nextchat.club?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) [<img src="https://vercel.com/button" alt="Deploy on Zeabur" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Deploy on Zeabur" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Open in Gitpod" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA)
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
![主界面](./docs/images/cover.png)
</div> </div>
## 企业版
满足您公司私有化部署和定制需求
- **品牌定制**:企业量身定制 VI/UI与企业品牌形象无缝契合
- **资源集成**:由企业管理人员统一配置和管理数十种 AI 资源,团队成员开箱即用
- **权限管理**:成员权限、资源权限、知识库权限层级分明,企业级 Admin Panel 统一控制
- **知识接入**:企业内部知识库与 AI 能力相结合,比通用 AI 更贴近企业自身业务需求
- **安全审计**:自动拦截敏感提问,支持追溯全部历史对话记录,让 AI 也能遵循企业信息安全规范
- **私有部署**:企业级私有部署,支持各类主流私有云部署,确保数据安全和隐私保护
- **持续更新**:提供多模态、智能体等前沿能力持续更新升级服务,常用常新、持续先进
企业版咨询: **business@nextchat.dev**
<img width="300" src="https://github.com/user-attachments/assets/bb29a11d-ff75-48a8-b1f8-d2d7238cf987">
## 开始使用 ## 开始使用
1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys);
@ -25,6 +38,12 @@
3. 部署完毕后,即可开始使用; 3. 部署完毕后,即可开始使用;
4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain)Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain)Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。
<div align="center">
![主界面](./docs/images/cover.png)
</div>
## 保持更新 ## 保持更新
如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。 如果你按照上述步骤一键部署了自己的项目,可能会发现总是提示“存在更新”的问题,这是由于 Vercel 会默认为你创建一个新项目而不是 fork 本项目,这会导致无法正确地检测更新。
@ -36,7 +55,7 @@
### 打开自动更新 ### 打开自动更新
> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! > 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)
当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows并启用 Upstream Sync Action启用之后即可开启每小时定时自动更新 当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows并启用 Upstream Sync Action启用之后即可开启每小时定时自动更新
@ -70,7 +89,7 @@ code1,code2,code3
### `OPENAI_API_KEY` (必填项) ### `OPENAI_API_KEY` (必填项)
OpanAI 密钥,你在 openai 账户页面申请的 api key使用英文逗号隔开多个 key这样可以随机轮询这些 key。 OpenAI 密钥,你在 openai 账户页面申请的 api key使用英文逗号隔开多个 key这样可以随机轮询这些 key。
### `CODE` (可选) ### `CODE` (可选)
@ -94,7 +113,7 @@ OpenAI 接口代理 URL如果你手动配置了 openai 接口代理,请填
### `AZURE_URL` (可选) ### `AZURE_URL` (可选)
> 形如https://{azure-resource-url}/openai/deployments/{deploy-name} > 形如https://{azure-resource-url}/openai
Azure 部署地址。 Azure 部署地址。
@ -106,14 +125,83 @@ Azure 密钥。
Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。
### `GOOGLE_API_KEY` (optional) ### `GOOGLE_API_KEY` (可选)
Google Gemini Pro 密钥. Google Gemini Pro 密钥.
### `GOOGLE_URL` (optional) ### `GOOGLE_URL` (可选)
Google Gemini Pro Api Url. Google Gemini Pro Api Url.
### `ANTHROPIC_API_KEY` (可选)
anthropic claude Api Key.
### `ANTHROPIC_API_VERSION` (可选)
anthropic claude Api version.
### `ANTHROPIC_URL` (可选)
anthropic claude Api Url.
### `BAIDU_API_KEY` (可选)
Baidu Api Key.
### `BAIDU_SECRET_KEY` (可选)
Baidu Secret Key.
### `BAIDU_URL` (可选)
Baidu Api Url.
### `BYTEDANCE_API_KEY` (可选)
ByteDance Api Key.
### `BYTEDANCE_URL` (可选)
ByteDance Api Url.
### `ALIBABA_API_KEY` (可选)
阿里云千问Api Key.
### `ALIBABA_URL` (可选)
阿里云千问Api Url.
### `IFLYTEK_URL` (可选)
讯飞星火Api Url.
### `IFLYTEK_API_KEY` (可选)
讯飞星火Api Key.
### `IFLYTEK_API_SECRET` (可选)
讯飞星火Api Secret.
### `CHATGLM_API_KEY` (可选)
ChatGLM Api Key.
### `CHATGLM_URL` (可选)
ChatGLM Api Url.
### `DEEPSEEK_API_KEY` (可选)
DeepSeek Api Key.
### `DEEPSEEK_URL` (可选)
DeepSeek Api Url.
### `HIDE_USER_API_KEY` (可选) ### `HIDE_USER_API_KEY` (可选)
如果你不想让用户自行填入 API Key将此环境变量设置为 1 即可。 如果你不想让用户自行填入 API Key将此环境变量设置为 1 即可。
@ -130,6 +218,13 @@ Google Gemini Pro Api Url.
如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。
### `WHITE_WEBDAV_ENDPOINTS` (可选)
如果你想增加允许访问的webdav服务地址可以使用该选项格式要求
- 每一个地址必须是一个完整的 endpoint
> `https://xxxx/xxx`
- 多个地址以`,`相连
### `CUSTOM_MODELS` (可选) ### `CUSTOM_MODELS` (可选)
> 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat``glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo` > 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat``glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`
@ -137,6 +232,49 @@ Google Gemini Pro Api Url.
用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。
在Azure的模式下支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。
> 如果你只能使用Azure模式那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)`
在ByteDance的模式下支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name)
> 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项
### `DEFAULT_MODEL` (可选)
更改默认模型
### `VISION_MODELS` (可选)
> 默认值:空
> 示例:`gpt-4-vision,claude-3-opus,my-custom-model` 表示为这些模型添加视觉能力,作为对默认模式匹配的补充(默认会检测包含"vision"、"claude-3"、"gemini-1.5"等关键词的模型)。
在默认模式匹配之外,添加更多具有视觉能力的模型。多个模型用逗号分隔。
### `DEFAULT_INPUT_TEMPLATE` (可选)
自定义默认的 template用于初始化『设置』中的『用户输入预处理』配置项
### `STABILITY_API_KEY` (optional)
Stability API密钥
### `STABILITY_URL` (optional)
自定义的Stability API请求地址
### `ENABLE_MCP` (optional)
启用MCPModel Context Protocol功能
### `SILICONFLOW_API_KEY` (optional)
SiliconFlow API Key.
### `SILICONFLOW_URL` (optional)
SiliconFlow API URL.
## 开发 ## 开发
点击下方按钮,开始二次开发: 点击下方按钮,开始二次开发:
@ -160,6 +298,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy
## 部署 ## 部署
### 宝塔面板部署
> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
### 容器部署 (推荐) ### 容器部署 (推荐)
> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。 > Docker 版本需要在 20 及其以上,否则会提示找不到镜像。
@ -186,6 +327,16 @@ docker run -d -p 3000:3000 \
yidadaa/chatgpt-next-web yidadaa/chatgpt-next-web
``` ```
如需启用 MCP 功能,可以使用:
```shell
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=页面访问密码 \
-e ENABLE_MCP=true \
yidadaa/chatgpt-next-web
```
如果你的本地代理需要账号密码,可以使用: 如果你的本地代理需要账号密码,可以使用:
```shell ```shell

317
README_JA.md Normal file
View File

@ -0,0 +1,317 @@
<div align="center">
<img src="./docs/images/ent.svg" alt="プレビュー"/>
<h1 align="center">NextChat</h1>
ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。
[NextChatAI](https://nextchat.club?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N)
[<img src="https://vercel.com/button" alt="Zeaburでデプロイ" height="30">](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [<img src="https://zeabur.com/button.svg" alt="Zeaburでデプロイ" height="30">](https://zeabur.com/templates/ZBUEFA) [<img src="https://gitpod.io/button/open-in-gitpod.svg" alt="Gitpodで開く" height="30">](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
</div>
## 企業版
あなたの会社のプライベートデプロイとカスタマイズのニーズに応える
- **ブランドカスタマイズ**:企業向けに特別に設計された VI/UI、企業ブランドイメージとシームレスにマッチ
- **リソース統合**企業管理者が数十種類のAIリソースを統一管理、チームメンバーはすぐに使用可能
- **権限管理**メンバーの権限、リソースの権限、ナレッジベースの権限を明確にし、企業レベルのAdmin Panelで統一管理
- **知識の統合**企業内部のナレッジベースとAI機能を結びつけ、汎用AIよりも企業自身の業務ニーズに近づける
- **セキュリティ監査**機密質問を自動的にブロックし、すべての履歴対話を追跡可能にし、AIも企業の情報セキュリティ基準に従わせる
- **プライベートデプロイ**:企業レベルのプライベートデプロイ、主要なプライベートクラウドデプロイをサポートし、データのセキュリティとプライバシーを保護
- **継続的な更新**:マルチモーダル、エージェントなどの最先端機能を継続的に更新し、常に最新であり続ける
企業版のお問い合わせ: **business@nextchat.dev**
## 始めに
1. [OpenAI API Key](https://platform.openai.com/account/api-keys)を準備する;
2. 右側のボタンをクリックしてデプロイを開始:
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) 、GitHubアカウントで直接ログインし、環境変数ページにAPI Keyと[ページアクセスパスワード](#設定ページアクセスパスワード) CODEを入力してください;
3. デプロイが完了したら、すぐに使用を開始できます;
4. (オプション)[カスタムドメインをバインド](https://vercel.com/docs/concepts/projects/domains/add-a-domain)Vercelが割り当てたドメインDNSは一部の地域で汚染されているため、カスタムドメインをバインドすると直接接続できます。
<div align="center">
![メインインターフェース](./docs/images/cover.png)
</div>
## 更新を維持する
もし上記の手順に従ってワンクリックでプロジェクトをデプロイした場合、「更新があります」というメッセージが常に表示されることがあります。これは、Vercel がデフォルトで新しいプロジェクトを作成するためで、本プロジェクトを fork していないことが原因です。そのため、正しく更新を検出できません。
以下の手順に従って再デプロイすることをお勧めします:
- 元のリポジトリを削除する
- ページ右上の fork ボタンを使って、本プロジェクトを fork する
- Vercel で再度選択してデプロイする、[詳細な手順はこちらを参照してください](./docs/vercel-ja.md)。
### 自動更新を開く
> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください!
プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります:
![自動更新](./docs/images/enable-actions.jpg)
![自動更新を有効にする](./docs/images/enable-actions-sync.jpg)
### 手動でコードを更新する
手動で即座に更新したい場合は、[GitHub のドキュメント](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/syncing-a-fork)を参照して、fork したプロジェクトを上流のコードと同期する方法を確認してください。
このプロジェクトをスターまたはウォッチしたり、作者をフォローすることで、新機能の更新通知をすぐに受け取ることができます。
## ページアクセスパスワードを設定する
> パスワードを設定すると、ユーザーは設定ページでアクセスコードを手動で入力しない限り、通常のチャットができず、未承認の状態であることを示すメッセージが表示されます。
> **警告**パスワードの桁数は十分に長く設定してください。7桁以上が望ましいです。さもないと、[ブルートフォース攻撃を受ける可能性があります](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/518)。
このプロジェクトは限られた権限管理機能を提供しています。Vercel プロジェクトのコントロールパネルで、環境変数ページに `CODE` という名前の環境変数を追加し、値をカンマで区切ったカスタムパスワードに設定してください:
```
code1,code2,code3
```
この環境変数を追加または変更した後、**プロジェクトを再デプロイ**して変更を有効にしてください。
## 環境変数
> 本プロジェクトのほとんどの設定は環境変数で行います。チュートリアル:[Vercel の環境変数を変更する方法](./docs/vercel-ja.md)。
### `OPENAI_API_KEY` (必須)
OpenAI の API キー。OpenAI アカウントページで申請したキーをカンマで区切って複数設定できます。これにより、ランダムにキーが選択されます。
### `CODE` (オプション)
アクセスパスワード。カンマで区切って複数設定可能。
**警告**:この項目を設定しないと、誰でもデプロイしたウェブサイトを利用でき、トークンが急速に消耗する可能性があるため、設定をお勧めします。
### `BASE_URL` (オプション)
> デフォルト: `https://api.openai.com`
> 例: `http://your-openai-proxy.com`
OpenAI API のプロキシ URL。手動で OpenAI API のプロキシを設定している場合はこのオプションを設定してください。
> SSL 証明書の問題がある場合は、`BASE_URL` のプロトコルを http に設定してください。
### `OPENAI_ORG_ID` (オプション)
OpenAI の組織 ID を指定します。
### `AZURE_URL` (オプション)
> 形式: https://{azure-resource-url}/openai/deployments/{deploy-name}
> `CUSTOM_MODELS``displayName` 形式で {deploy-name} を設定した場合、`AZURE_URL` から {deploy-name} を省略できます。
Azure のデプロイ URL。
### `AZURE_API_KEY` (オプション)
Azure の API キー。
### `AZURE_API_VERSION` (オプション)
Azure API バージョン。[Azure ドキュメント](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)で確認できます。
### `GOOGLE_API_KEY` (オプション)
Google Gemini Pro API キー。
### `GOOGLE_URL` (オプション)
Google Gemini Pro API の URL。
### `ANTHROPIC_API_KEY` (オプション)
Anthropic Claude API キー。
### `ANTHROPIC_API_VERSION` (オプション)
Anthropic Claude API バージョン。
### `ANTHROPIC_URL` (オプション)
Anthropic Claude API の URL。
### `BAIDU_API_KEY` (オプション)
Baidu API キー。
### `BAIDU_SECRET_KEY` (オプション)
Baidu シークレットキー。
### `BAIDU_URL` (オプション)
Baidu API の URL。
### `BYTEDANCE_API_KEY` (オプション)
ByteDance API キー。
### `BYTEDANCE_URL` (オプション)
ByteDance API の URL。
### `ALIBABA_API_KEY` (オプション)
アリババ千问API キー。
### `ALIBABA_URL` (オプション)
アリババ千问API の URL。
### `HIDE_USER_API_KEY` (オプション)
ユーザーが API キーを入力できないようにしたい場合は、この環境変数を 1 に設定します。
### `DISABLE_GPT4` (オプション)
ユーザーが GPT-4 を使用できないようにしたい場合は、この環境変数を 1 に設定します。
### `ENABLE_BALANCE_QUERY` (オプション)
バランスクエリ機能を有効にしたい場合は、この環境変数を 1 に設定します。
### `DISABLE_FAST_LINK` (オプション)
リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。
### `WHITE_WEBDAV_ENDPOINTS` (オプション)
アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件:
- 各アドレスは完全なエンドポイントでなければなりません。
> `https://xxxx/xxx`
- 複数のアドレスは `,` で接続します。
### `CUSTOM_MODELS` (オプション)
> 例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` は `qwen-7b-chat``glm-6b` をモデルリストに追加し、`gpt-3.5-turbo` を削除し、`gpt-4-1106-preview` のモデル名を `gpt-4-turbo` として表示します。
> すべてのモデルを無効にし、特定のモデルを有効にしたい場合は、`-all,+gpt-3.5-turbo` を使用します。これは `gpt-3.5-turbo` のみを有効にすることを意味します。
モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。
Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。
ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名deploy-nameを設定できます。
> 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。
### `DEFAULT_MODEL` (オプション)
デフォルトのモデルを変更します。
### `VISION_MODELS` (オプション)
> デフォルト:空
> 例:`gpt-4-vision,claude-3-opus,my-custom-model` は、これらのモデルにビジョン機能を追加します。これはデフォルトのパターンマッチング("vision"、"claude-3"、"gemini-1.5"などのキーワードを含むモデルを検出)に加えて適用されます。
デフォルトのパターンマッチングに加えて、追加のモデルにビジョン機能を付与します。複数のモデルはカンマで区切ります。
### `DEFAULT_INPUT_TEMPLATE` (オプション)
『設定』の『ユーザー入力前処理』の初期設定に使用するテンプレートをカスタマイズします。
## 開発
下のボタンをクリックして二次開発を開始してください:
[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web)
コードを書く前に、プロジェクトのルートディレクトリに `.env.local` ファイルを新規作成し、環境変数を記入します:
```
OPENAI_API_KEY=<your api key here>
```
### ローカル開発
1. Node.js 18 と Yarn をインストールします。具体的な方法は ChatGPT にお尋ねください。
2. `yarn install && yarn dev` を実行します。⚠️ 注意:このコマンドはローカル開発用であり、デプロイには使用しないでください。
3. ローカルでデプロイしたい場合は、`yarn install && yarn build && yarn start` コマンドを使用してください。プロセスを守るために pm2 を使用することもできます。詳細は ChatGPT にお尋ねください。
## デプロイ
### コンテナデプロイ(推奨)
> Docker バージョンは 20 以上が必要です。それ以下だとイメージが見つからないというエラーが出ます。
> ⚠️ 注意Docker バージョンは最新バージョンより 12 日遅れることが多いため、デプロイ後に「更新があります」の通知が出続けることがありますが、正常です。
```shell
docker pull yidadaa/chatgpt-next-web
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=ページアクセスパスワード \
yidadaa/chatgpt-next-web
```
プロキシを指定することもできます:
```shell
docker run -d -p 3000:3000 \
-e OPENAI_API_KEY=sk-xxxx \
-e CODE=ページアクセスパスワード \
--net=host \
-e PROXY_URL=http://127.0.0.1:7890 \
yidadaa/chatgpt-next-web
```
ローカルプロキシがアカウントとパスワードを必要とする場合は、以下を使用できます:
```shell
-e PROXY_URL="http://127.0.0.1:7890 user password"
```
他の環境変数を指定する必要がある場合は、上記のコマンドに `-e 環境変数=環境変数値` を追加して指定してください。
### ローカルデプロイ
コンソールで以下のコマンドを実行します:
```shell
bash <(curl -s https://raw.githubusercontent.com/Yidadaa/ChatGPT-Next-Web/main/scripts/setup.sh)
```
⚠️ 注意インストール中に問題が発生した場合は、Docker を使用してデプロイしてください。
## 謝辞
### 寄付者
> 英語版をご覧ください。
### 貢献者
[プロジェクトの貢献者リストはこちら](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors)
### 関連プロジェクト
- [one-api](https://github.com/songquanpeng/one-api): 一つのプラットフォームで大規模モデルのクォータ管理を提供し、市場に出回っているすべての主要な大規模言語モデルをサポートします。
## オープンソースライセンス
[MIT](https://opensource.org/license/mit/)

View File

@ -0,0 +1,82 @@
import { ApiPath } from "@/app/constant";
import { NextRequest } from "next/server";
import { handle as openaiHandler } from "../../openai";
import { handle as azureHandler } from "../../azure";
import { handle as googleHandler } from "../../google";
import { handle as anthropicHandler } from "../../anthropic";
import { handle as baiduHandler } from "../../baidu";
import { handle as bytedanceHandler } from "../../bytedance";
import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability";
import { handle as iflytekHandler } from "../../iflytek";
import { handle as deepseekHandler } from "../../deepseek";
import { handle as siliconflowHandler } from "../../siliconflow";
import { handle as xaiHandler } from "../../xai";
import { handle as chatglmHandler } from "../../glm";
import { handle as proxyHandler } from "../../proxy";
async function handle(
req: NextRequest,
{ params }: { params: { provider: string; path: string[] } },
) {
const apiPath = `/api/${params.provider}`;
console.log(`[${params.provider} Route] params `, params);
switch (apiPath) {
case ApiPath.Azure:
return azureHandler(req, { params });
case ApiPath.Google:
return googleHandler(req, { params });
case ApiPath.Anthropic:
return anthropicHandler(req, { params });
case ApiPath.Baidu:
return baiduHandler(req, { params });
case ApiPath.ByteDance:
return bytedanceHandler(req, { params });
case ApiPath.Alibaba:
return alibabaHandler(req, { params });
// case ApiPath.Tencent: using "/api/tencent"
case ApiPath.Moonshot:
return moonshotHandler(req, { params });
case ApiPath.Stability:
return stabilityHandler(req, { params });
case ApiPath.Iflytek:
return iflytekHandler(req, { params });
case ApiPath.DeepSeek:
return deepseekHandler(req, { params });
case ApiPath.XAI:
return xaiHandler(req, { params });
case ApiPath.ChatGLM:
return chatglmHandler(req, { params });
case ApiPath.SiliconFlow:
return siliconflowHandler(req, { params });
case ApiPath.OpenAI:
return openaiHandler(req, { params });
default:
return proxyHandler(req, { params });
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

129
app/api/alibaba.ts Normal file
View File

@ -0,0 +1,129 @@
import { getServerSideConfig } from "@/app/config/server";
import {
ALIBABA_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Alibaba Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Qwen);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Alibaba] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Alibaba, "");
let baseUrl = serverConfig.alibabaUrl || ALIBABA_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
"X-DashScope-SSE": req.headers.get("X-DashScope-SSE") ?? "disable",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Alibaba as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[Alibaba] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

170
app/api/anthropic.ts Normal file
View File

@ -0,0 +1,170 @@
import { getServerSideConfig } from "@/app/config/server";
import {
ANTHROPIC_BASE_URL,
Anthropic,
ApiPath,
ServiceProvider,
ModelProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Anthropic Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const subpath = params.path.join("/");
if (!ALLOWD_PATH.has(subpath)) {
console.log("[Anthropic Route] forbidden path ", subpath);
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + subpath,
},
{
status: 403,
},
);
}
const authResult = auth(req, ModelProvider.Claude);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Anthropic] ", e);
return NextResponse.json(prettyObject(e));
}
}
const serverConfig = getServerSideConfig();
async function request(req: NextRequest) {
const controller = new AbortController();
let authHeaderName = "x-api-key";
let authValue =
req.headers.get(authHeaderName) ||
req.headers.get("Authorization")?.replaceAll("Bearer ", "").trim() ||
serverConfig.anthropicApiKey ||
"";
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Anthropic, "");
let baseUrl =
serverConfig.anthropicUrl || serverConfig.baseUrl || ANTHROPIC_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
// try rebuild url, when using cloudflare ai gateway in server
const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}${path}`);
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
"Cache-Control": "no-store",
"anthropic-dangerous-direct-browser-access": "true",
[authHeaderName]: authValue,
"anthropic-version":
req.headers.get("anthropic-version") ||
serverConfig.anthropicApiVersion ||
Anthropic.Vision,
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Anthropic as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[Anthropic] filter`, e);
}
}
// console.log("[Anthropic request]", fetchOptions.headers, req.method);
try {
const res = await fetch(fetchUrl, fetchOptions);
// console.log(
// "[Anthropic response]",
// res.status,
// " ",
// res.headers,
// res.url,
// );
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -0,0 +1,73 @@
import md5 from "spark-md5";
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";
async function handle(req: NextRequest, res: NextResponse) {
const serverConfig = getServerSideConfig();
const storeUrl = () =>
`https://api.cloudflare.com/client/v4/accounts/${serverConfig.cloudflareAccountId}/storage/kv/namespaces/${serverConfig.cloudflareKVNamespaceId}`;
const storeHeaders = () => ({
Authorization: `Bearer ${serverConfig.cloudflareKVApiKey}`,
});
if (req.method === "POST") {
const clonedBody = await req.text();
const hashedCode = md5.hash(clonedBody).trim();
const body: {
key: string;
value: string;
expiration_ttl?: number;
} = {
key: hashedCode,
value: clonedBody,
};
try {
const ttl = parseInt(serverConfig.cloudflareKVTTL as string);
if (ttl > 60) {
body["expiration_ttl"] = ttl;
}
} catch (e) {
console.error(e);
}
const res = await fetch(`${storeUrl()}/bulk`, {
headers: {
...storeHeaders(),
"Content-Type": "application/json",
},
method: "PUT",
body: JSON.stringify([body]),
});
const result = await res.json();
console.log("save data", result);
if (result?.success) {
return NextResponse.json(
{ code: 0, id: hashedCode, result },
{ status: res.status },
);
}
return NextResponse.json(
{ error: true, msg: "Save data error" },
{ status: 400 },
);
}
if (req.method === "GET") {
const id = req?.nextUrl?.searchParams?.get("id");
const res = await fetch(`${storeUrl()}/values/${id}`, {
headers: storeHeaders(),
method: "GET",
});
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: res.headers,
});
}
return NextResponse.json(
{ error: true, msg: "Invalid request" },
{ status: 400 },
);
}
export const POST = handle;
export const GET = handle;
export const runtime = "edge";

View File

@ -57,12 +57,62 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) {
if (!apiKey) { if (!apiKey) {
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
const systemApiKey = // const systemApiKey =
modelProvider === ModelProvider.GeminiPro // modelProvider === ModelProvider.GeminiPro
? serverConfig.googleApiKey // ? serverConfig.googleApiKey
: serverConfig.isAzure // : serverConfig.isAzure
? serverConfig.azureApiKey // ? serverConfig.azureApiKey
: serverConfig.apiKey; // : serverConfig.apiKey;
let systemApiKey: string | undefined;
switch (modelProvider) {
case ModelProvider.Stability:
systemApiKey = serverConfig.stabilityApiKey;
break;
case ModelProvider.GeminiPro:
systemApiKey = serverConfig.googleApiKey;
break;
case ModelProvider.Claude:
systemApiKey = serverConfig.anthropicApiKey;
break;
case ModelProvider.Doubao:
systemApiKey = serverConfig.bytedanceApiKey;
break;
case ModelProvider.Ernie:
systemApiKey = serverConfig.baiduApiKey;
break;
case ModelProvider.Qwen:
systemApiKey = serverConfig.alibabaApiKey;
break;
case ModelProvider.Moonshot:
systemApiKey = serverConfig.moonshotApiKey;
break;
case ModelProvider.Iflytek:
systemApiKey =
serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret;
break;
case ModelProvider.DeepSeek:
systemApiKey = serverConfig.deepseekApiKey;
break;
case ModelProvider.XAI:
systemApiKey = serverConfig.xaiApiKey;
break;
case ModelProvider.ChatGLM:
systemApiKey = serverConfig.chatglmApiKey;
break;
case ModelProvider.SiliconFlow:
systemApiKey = serverConfig.siliconFlowApiKey;
break;
case ModelProvider.GPT:
default:
if (req.nextUrl.pathname.includes("azure/deployments")) {
systemApiKey = serverConfig.azureApiKey;
} else {
systemApiKey = serverConfig.apiKey;
}
}
if (systemApiKey) { if (systemApiKey) {
console.log("[Auth] use system api key"); console.log("[Auth] use system api key");
req.headers.set("Authorization", `Bearer ${systemApiKey}`); req.headers.set("Authorization", `Bearer ${systemApiKey}`);

32
app/api/azure.ts Normal file
View File

@ -0,0 +1,32 @@
import { ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "./auth";
import { requestOpenai } from "./common";
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Azure Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const subpath = params.path.join("/");
const authResult = auth(req, ModelProvider.GPT);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
return await requestOpenai(req);
} catch (e) {
console.error("[Azure] ", e);
return NextResponse.json(prettyObject(e));
}
}

144
app/api/baidu.ts Normal file
View File

@ -0,0 +1,144 @@
import { getServerSideConfig } from "@/app/config/server";
import {
BAIDU_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Baidu Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Ernie);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
if (!serverConfig.baiduApiKey || !serverConfig.baiduSecretKey) {
return NextResponse.json(
{
error: true,
message: `missing BAIDU_API_KEY or BAIDU_SECRET_KEY in server env vars`,
},
{
status: 401,
},
);
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Baidu] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Baidu, "");
let baseUrl = serverConfig.baiduUrl || BAIDU_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const { access_token } = await getAccessToken(
serverConfig.baiduApiKey as string,
serverConfig.baiduSecretKey as string,
);
const fetchUrl = `${baseUrl}${path}?access_token=${access_token}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Baidu as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[Baidu] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

129
app/api/bytedance.ts Normal file
View File

@ -0,0 +1,129 @@
import { getServerSideConfig } from "@/app/config/server";
import {
BYTEDANCE_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[ByteDance Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Doubao);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[ByteDance] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ByteDance, "");
let baseUrl = serverConfig.bytedanceUrl || BYTEDANCE_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ByteDance as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[ByteDance] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -1,17 +1,19 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "../config/server"; import { getServerSideConfig } from "../config/server";
import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant"; import { OPENAI_BASE_URL, ServiceProvider } from "../constant";
import { collectModelTable } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare";
import { makeAzurePath } from "../azure"; import { getModelProvider, isModelNotavailableInServer } from "../utils/model";
const serverConfig = getServerSideConfig(); const serverConfig = getServerSideConfig();
export async function requestOpenai(req: NextRequest) { export async function requestOpenai(req: NextRequest) {
const controller = new AbortController(); const controller = new AbortController();
const isAzure = req.nextUrl.pathname.includes("azure/deployments");
var authValue, var authValue,
authHeaderName = ""; authHeaderName = "";
if (serverConfig.isAzure) { if (isAzure) {
authValue = authValue =
req.headers req.headers
.get("Authorization") .get("Authorization")
@ -25,13 +27,10 @@ export async function requestOpenai(req: NextRequest) {
authHeaderName = "Authorization"; authHeaderName = "Authorization";
} }
let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", "");
"/api/openai/",
"",
);
let baseUrl = let baseUrl =
serverConfig.azureUrl || serverConfig.baseUrl || OPENAI_BASE_URL; (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL;
if (!baseUrl.startsWith("http")) { if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`; baseUrl = `https://${baseUrl}`;
@ -43,10 +42,6 @@ export async function requestOpenai(req: NextRequest) {
console.log("[Proxy] ", path); console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl); console.log("[Base Url]", baseUrl);
// this fix [Org ID] undefined in server side if not using custom point
if (serverConfig.openaiOrgId !== undefined) {
console.log("[Org ID]", serverConfig.openaiOrgId);
}
const timeoutId = setTimeout( const timeoutId = setTimeout(
() => { () => {
@ -55,17 +50,46 @@ export async function requestOpenai(req: NextRequest) {
10 * 60 * 1000, 10 * 60 * 1000,
); );
if (serverConfig.isAzure) { if (isAzure) {
if (!serverConfig.azureApiVersion) { const azureApiVersion =
return NextResponse.json({ req?.nextUrl?.searchParams?.get("api-version") ||
error: true, serverConfig.azureApiVersion;
message: `missing AZURE_API_VERSION in server env vars`, baseUrl = baseUrl.split("/deployments").shift() as string;
}); path = `${req.nextUrl.pathname.replaceAll(
"/api/azure/",
"",
)}?api-version=${azureApiVersion}`;
// Forward compatibility:
// if display_name(deployment_name) not set, and '{deploy-id}' in AZURE_URL
// then using default '{deploy-id}'
if (serverConfig.customModels && serverConfig.azureUrl) {
const modelName = path.split("/")[1];
let realDeployName = "";
serverConfig.customModels
.split(",")
.filter((v) => !!v && !v.startsWith("-") && v.includes(modelName))
.forEach((m) => {
const [fullName, displayName] = m.split("=");
const [_, providerName] = getModelProvider(fullName);
if (providerName === "azure" && !displayName) {
const [_, deployId] = (serverConfig?.azureUrl ?? "").split(
"deployments/",
);
if (deployId) {
realDeployName = deployId;
}
}
});
if (realDeployName) {
console.log("[Replace with DeployId", realDeployName);
path = path.replaceAll(modelName, realDeployName);
}
} }
path = makeAzurePath(path, serverConfig.azureApiVersion);
} }
const fetchUrl = `${baseUrl}/${path}`; const fetchUrl = cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
console.log("fetchUrl", fetchUrl);
const fetchOptions: RequestInit = { const fetchOptions: RequestInit = {
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
@ -87,17 +111,23 @@ export async function requestOpenai(req: NextRequest) {
// #1815 try to refuse gpt4 request // #1815 try to refuse gpt4 request
if (serverConfig.customModels && req.body) { if (serverConfig.customModels && req.body) {
try { try {
const modelTable = collectModelTable(
DEFAULT_MODELS,
serverConfig.customModels,
);
const clonedBody = await req.text(); const clonedBody = await req.text();
fetchOptions.body = clonedBody; fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string }; const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false // not undefined and is false
if (modelTable[jsonBody?.model ?? ""].available === false) { if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
[
ServiceProvider.OpenAI,
ServiceProvider.Azure,
jsonBody?.model as string, // support provider-unspecified model
],
)
) {
return NextResponse.json( return NextResponse.json(
{ {
error: true, error: true,
@ -116,12 +146,29 @@ export async function requestOpenai(req: NextRequest) {
try { try {
const res = await fetch(fetchUrl, fetchOptions); const res = await fetch(fetchUrl, fetchOptions);
// Extract the OpenAI-Organization header from the response
const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
// Check if serverConfig.openaiOrgId is defined and not an empty string
if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
// If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
console.log("[Org ID]", openaiOrganizationHeader);
} else {
console.log("[Org ID] is not set up.");
}
// to prevent browser prompt for credentials // to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers); const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate"); newHeaders.delete("www-authenticate");
// to disable nginx buffering // to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no"); newHeaders.set("X-Accel-Buffering", "no");
// Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
// Also, this is to prevent the header from being sent to the client
if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") {
newHeaders.delete("OpenAI-Organization");
}
// The latest version of the OpenAI API forced the content-encoding to be "br" in json response // The latest version of the OpenAI API forced the content-encoding to be "br" in json response
// So if the streaming is disabled, we need to remove the content-encoding header // So if the streaming is disabled, we need to remove the content-encoding header
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header

View File

@ -13,6 +13,8 @@ const DANGER_CONFIG = {
hideBalanceQuery: serverConfig.hideBalanceQuery, hideBalanceQuery: serverConfig.hideBalanceQuery,
disableFastLink: serverConfig.disableFastLink, disableFastLink: serverConfig.disableFastLink,
customModels: serverConfig.customModels, customModels: serverConfig.customModels,
defaultModel: serverConfig.defaultModel,
visionModels: serverConfig.visionModels,
}; };
declare global { declare global {

View File

@ -1,43 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const [protocol, ...subpath] = params.path;
const targetUrl = `${protocol}://${subpath.join("/")}`;
const method = req.headers.get("method") ?? undefined;
const shouldNotHaveBody = ["get", "head"].includes(
method?.toLowerCase() ?? "",
);
const fetchOptions: RequestInit = {
headers: {
authorization: req.headers.get("authorization") ?? "",
},
body: shouldNotHaveBody ? null : req.body,
method,
// @ts-ignore
duplex: "half",
};
const fetchResult = await fetch(targetUrl, fetchOptions);
console.log("[Any Proxy]", targetUrl, {
status: fetchResult.status,
statusText: fetchResult.statusText,
});
return fetchResult;
}
export const POST = handle;
export const GET = handle;
export const OPTIONS = handle;
export const runtime = "edge";

128
app/api/deepseek.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
DEEPSEEK_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[DeepSeek Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.DeepSeek);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[DeepSeek] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.DeepSeek, "");
let baseUrl = serverConfig.deepseekUrl || DEEPSEEK_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.DeepSeek as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[DeepSeek] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

129
app/api/glm.ts Normal file
View File

@ -0,0 +1,129 @@
import { getServerSideConfig } from "@/app/config/server";
import {
CHATGLM_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[GLM Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.ChatGLM);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[GLM] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.ChatGLM, "");
let baseUrl = serverConfig.chatglmUrl || CHATGLM_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
console.log("[Fetch Url] ", fetchUrl);
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.ChatGLM as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[GLM] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -1,11 +1,14 @@
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth"; import { auth } from "./auth";
import { getServerSideConfig } from "@/app/config/server"; import { getServerSideConfig } from "@/app/config/server";
import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
async function handle( const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest, req: NextRequest,
{ params }: { params: { path: string[] } }, { params }: { params: { provider: string; path: string[] } },
) { ) {
console.log("[Google Route] params ", params); console.log("[Google Route] params ", params);
@ -13,32 +16,6 @@ async function handle(
return NextResponse.json({ body: "OK" }, { status: 200 }); return NextResponse.json({ body: "OK" }, { status: 200 });
} }
const controller = new AbortController();
const serverConfig = getServerSideConfig();
let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", "");
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const authResult = auth(req, ModelProvider.GeminiPro); const authResult = auth(req, ModelProvider.GeminiPro);
if (authResult.error) { if (authResult.error) {
return NextResponse.json(authResult, { return NextResponse.json(authResult, {
@ -46,12 +23,13 @@ async function handle(
}); });
} }
const bearToken = req.headers.get("Authorization") ?? ""; const bearToken =
req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || "";
const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const token = bearToken.trim().replaceAll("Bearer ", "").trim();
const key = token ? token : serverConfig.googleApiKey; const apiKey = token ? token : serverConfig.googleApiKey;
if (!key) { if (!apiKey) {
return NextResponse.json( return NextResponse.json(
{ {
error: true, error: true,
@ -62,12 +40,70 @@ async function handle(
}, },
); );
} }
try {
const response = await request(req, apiKey);
return response;
} catch (e) {
console.error("[Google] ", e);
return NextResponse.json(prettyObject(e));
}
}
const fetchUrl = `${baseUrl}/${path}?key=${key}`; export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"bom1",
"cle1",
"cpt1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest, apiKey: string) {
const controller = new AbortController();
let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL;
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Google, "");
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}${
req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : ""
}`;
console.log("[Fetch Url] ", fetchUrl);
const fetchOptions: RequestInit = { const fetchOptions: RequestInit = {
headers: { headers: {
"Content-Type": "application/json", "Content-Type": "application/json",
"Cache-Control": "no-store", "Cache-Control": "no-store",
"x-goog-api-key":
req.headers.get("x-goog-api-key") ||
(req.headers.get("Authorization") ?? "").replace("Bearer ", ""),
}, },
method: req.method, method: req.method,
body: req.body, body: req.body,
@ -95,22 +131,3 @@ async function handle(
clearTimeout(timeoutId); clearTimeout(timeoutId);
} }
} }
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"bom1",
"cle1",
"cpt1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

129
app/api/iflytek.ts Normal file
View File

@ -0,0 +1,129 @@
import { getServerSideConfig } from "@/app/config/server";
import {
IFLYTEK_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
// iflytek
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Iflytek Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Iflytek);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Iflytek] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// iflytek use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Iflytek, "");
let baseUrl = serverConfig.iflytekUrl || IFLYTEK_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Iflytek as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[Iflytek] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

128
app/api/moonshot.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
MOONSHOT_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Moonshot Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Moonshot);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Moonshot] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.Moonshot, "");
let baseUrl = serverConfig.moonshotUrl || MOONSHOT_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.Moonshot as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[Moonshot] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -3,24 +3,30 @@ import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, OpenaiPath } from "@/app/constant"; import { ModelProvider, OpenaiPath } from "@/app/constant";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server"; import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth"; import { auth } from "./auth";
import { requestOpenai } from "../../common"; import { requestOpenai } from "./common";
const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
function getModels(remoteModelRes: OpenAIListModelResponse) { function getModels(remoteModelRes: OpenAIListModelResponse) {
const config = getServerSideConfig(); const config = getServerSideConfig();
if (config.disableGPT4) { if (config.disableGPT4) {
remoteModelRes.data = remoteModelRes.data.filter( remoteModelRes.data = remoteModelRes.data.filter(
(m) => !m.id.startsWith("gpt-4"), (m) =>
!(
m.id.startsWith("gpt-4") ||
m.id.startsWith("chatgpt-4o") ||
m.id.startsWith("o1") ||
m.id.startsWith("o3")
) || m.id.startsWith("gpt-4o-mini"),
); );
} }
return remoteModelRes; return remoteModelRes;
} }
async function handle( export async function handle(
req: NextRequest, req: NextRequest,
{ params }: { params: { path: string[] } }, { params }: { params: { path: string[] } },
) { ) {
@ -32,7 +38,7 @@ async function handle(
const subpath = params.path.join("/"); const subpath = params.path.join("/");
if (!ALLOWD_PATH.has(subpath)) { if (!ALLOWED_PATH.has(subpath)) {
console.log("[OpenAI Route] forbidden path ", subpath); console.log("[OpenAI Route] forbidden path ", subpath);
return NextResponse.json( return NextResponse.json(
{ {
@ -70,27 +76,3 @@ async function handle(
return NextResponse.json(prettyObject(e)); return NextResponse.json(prettyObject(e));
} }
} }
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

89
app/api/proxy.ts Normal file
View File

@ -0,0 +1,89 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Proxy Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const serverConfig = getServerSideConfig();
// remove path params from searchParams
req.nextUrl.searchParams.delete("path");
req.nextUrl.searchParams.delete("provider");
const subpath = params.path.join("/");
const fetchUrl = `${req.headers.get(
"x-base-url",
)}/${subpath}?${req.nextUrl.searchParams.toString()}`;
const skipHeaders = ["connection", "host", "origin", "referer", "cookie"];
const headers = new Headers(
Array.from(req.headers.entries()).filter((item) => {
if (
item[0].indexOf("x-") > -1 ||
item[0].indexOf("sec-") > -1 ||
skipHeaders.includes(item[0])
) {
return false;
}
return true;
}),
);
// if dalle3 use openai api key
const baseUrl = req.headers.get("x-base-url");
if (baseUrl?.includes("api.openai.com")) {
if (!serverConfig.apiKey) {
return NextResponse.json(
{ error: "OpenAI API key not configured" },
{ status: 500 },
);
}
headers.set("Authorization", `Bearer ${serverConfig.apiKey}`);
}
const controller = new AbortController();
const fetchOptions: RequestInit = {
headers,
method: req.method,
body: req.body,
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
// The latest version of the OpenAI API forced the content-encoding to be "br" in json response
// So if the streaming is disabled, we need to remove the content-encoding header
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
// The browser will try to decode the response with brotli and fail
newHeaders.delete("content-encoding");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

128
app/api/siliconflow.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
SILICONFLOW_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[SiliconFlow Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.SiliconFlow);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[SiliconFlow] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.SiliconFlow, "");
let baseUrl = serverConfig.siliconFlowUrl || SILICONFLOW_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.SiliconFlow as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[SiliconFlow] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

99
app/api/stability.ts Normal file
View File

@ -0,0 +1,99 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant";
import { auth } from "@/app/api/auth";
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Stability] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const controller = new AbortController();
const serverConfig = getServerSideConfig();
let baseUrl = serverConfig.stabilityUrl || STABILITY_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
let path = `${req.nextUrl.pathname}`.replaceAll("/api/stability/", "");
console.log("[Stability Proxy] ", path);
console.log("[Stability Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const authResult = auth(req, ModelProvider.Stability);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
const bearToken = req.headers.get("Authorization") ?? "";
const token = bearToken.trim().replaceAll("Bearer ", "").trim();
const key = token ? token : serverConfig.stabilityApiKey;
if (!key) {
return NextResponse.json(
{
error: true,
message: `missing STABILITY_API_KEY in server env vars`,
},
{
status: 401,
},
);
}
const fetchUrl = `${baseUrl}/${path}`;
console.log("[Stability Url] ", fetchUrl);
const fetchOptions: RequestInit = {
headers: {
"Content-Type": req.headers.get("Content-Type") || "multipart/form-data",
Accept: req.headers.get("Accept") || "application/json",
Authorization: `Bearer ${key}`,
},
method: req.method,
body: req.body,
// to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

117
app/api/tencent/route.ts Normal file
View File

@ -0,0 +1,117 @@
import { getServerSideConfig } from "@/app/config/server";
import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { getHeader } from "@/app/utils/tencent";
const serverConfig = getServerSideConfig();
async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[Tencent Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.Hunyuan);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[Tencent] ", e);
return NextResponse.json(prettyObject(e));
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest) {
const controller = new AbortController();
let baseUrl = serverConfig.tencentUrl || TENCENT_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = baseUrl;
const body = await req.text();
const headers = await getHeader(
body,
serverConfig.tencentSecretId as string,
serverConfig.tencentSecretKey as string,
);
const fetchOptions: RequestInit = {
headers,
method: req.method,
body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -0,0 +1,73 @@
import { NextRequest, NextResponse } from "next/server";
async function handle(
req: NextRequest,
{ params }: { params: { action: string; key: string[] } },
) {
const requestUrl = new URL(req.url);
const endpoint = requestUrl.searchParams.get("endpoint");
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const [...key] = params.key;
// only allow to request to *.upstash.io
if (!endpoint || !new URL(endpoint).hostname.endsWith(".upstash.io")) {
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + params.key.join("/"),
},
{
status: 403,
},
);
}
// only allow upstash get and set method
if (params.action !== "get" && params.action !== "set") {
console.log("[Upstash Route] forbidden action ", params.action);
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + params.action,
},
{
status: 403,
},
);
}
const targetUrl = `${endpoint}/${params.action}/${params.key.join("/")}`;
const method = req.method;
const shouldNotHaveBody = ["get", "head"].includes(
method?.toLowerCase() ?? "",
);
const fetchOptions: RequestInit = {
headers: {
authorization: req.headers.get("authorization") ?? "",
},
body: shouldNotHaveBody ? null : req.body,
method,
// @ts-ignore
duplex: "half",
};
console.log("[Upstash Proxy]", targetUrl, fetchOptions);
const fetchResult = await fetch(targetUrl, fetchOptions);
console.log("[Any Proxy]", targetUrl, {
status: fetchResult.status,
statusText: fetchResult.statusText,
});
return fetchResult;
}
export const POST = handle;
export const GET = handle;
export const OPTIONS = handle;
export const runtime = "edge";

View File

@ -0,0 +1,167 @@
import { NextRequest, NextResponse } from "next/server";
import { STORAGE_KEY, internalAllowedWebDavEndpoints } from "../../../constant";
import { getServerSideConfig } from "@/app/config/server";
const config = getServerSideConfig();
const mergedAllowedWebDavEndpoints = [
...internalAllowedWebDavEndpoints,
...config.allowedWebDavEndpoints,
].filter((domain) => Boolean(domain.trim()));
const normalizeUrl = (url: string) => {
try {
return new URL(url);
} catch (err) {
return null;
}
};
async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const folder = STORAGE_KEY;
const fileName = `${folder}/backup.json`;
const requestUrl = new URL(req.url);
let endpoint = requestUrl.searchParams.get("endpoint");
let proxy_method = requestUrl.searchParams.get("proxy_method") || req.method;
// Validate the endpoint to prevent potential SSRF attacks
if (
!endpoint ||
!mergedAllowedWebDavEndpoints.some((allowedEndpoint) => {
const normalizedAllowedEndpoint = normalizeUrl(allowedEndpoint);
const normalizedEndpoint = normalizeUrl(endpoint as string);
return (
normalizedEndpoint &&
normalizedEndpoint.hostname === normalizedAllowedEndpoint?.hostname &&
normalizedEndpoint.pathname.startsWith(
normalizedAllowedEndpoint.pathname,
)
);
})
) {
return NextResponse.json(
{
error: true,
msg: "Invalid endpoint",
},
{
status: 400,
},
);
}
if (!endpoint?.endsWith("/")) {
endpoint += "/";
}
const endpointPath = params.path.join("/");
const targetPath = `${endpoint}${endpointPath}`;
// only allow MKCOL, GET, PUT
if (
proxy_method !== "MKCOL" &&
proxy_method !== "GET" &&
proxy_method !== "PUT"
) {
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + targetPath,
},
{
status: 403,
},
);
}
// for MKCOL request, only allow request ${folder}
if (proxy_method === "MKCOL" && !targetPath.endsWith(folder)) {
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + targetPath,
},
{
status: 403,
},
);
}
// for GET request, only allow request ending with fileName
if (proxy_method === "GET" && !targetPath.endsWith(fileName)) {
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + targetPath,
},
{
status: 403,
},
);
}
// for PUT request, only allow request ending with fileName
if (proxy_method === "PUT" && !targetPath.endsWith(fileName)) {
return NextResponse.json(
{
error: true,
msg: "you are not allowed to request " + targetPath,
},
{
status: 403,
},
);
}
const targetUrl = targetPath;
const method = proxy_method || req.method;
const shouldNotHaveBody = ["get", "head"].includes(
method?.toLowerCase() ?? "",
);
const fetchOptions: RequestInit = {
headers: {
authorization: req.headers.get("authorization") ?? "",
},
body: shouldNotHaveBody ? null : req.body,
redirect: "manual",
method,
// @ts-ignore
duplex: "half",
};
let fetchResult;
try {
fetchResult = await fetch(targetUrl, fetchOptions);
} finally {
console.log(
"[Any Proxy]",
targetUrl,
{
method: method,
},
{
status: fetchResult?.status,
statusText: fetchResult?.statusText,
},
);
}
return fetchResult;
}
export const PUT = handle;
export const GET = handle;
export const OPTIONS = handle;
export const runtime = "edge";

128
app/api/xai.ts Normal file
View File

@ -0,0 +1,128 @@
import { getServerSideConfig } from "@/app/config/server";
import {
XAI_BASE_URL,
ApiPath,
ModelProvider,
ServiceProvider,
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "@/app/api/auth";
import { isModelNotavailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log("[XAI Route] params ", params);
if (req.method === "OPTIONS") {
return NextResponse.json({ body: "OK" }, { status: 200 });
}
const authResult = auth(req, ModelProvider.XAI);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await request(req);
return response;
} catch (e) {
console.error("[XAI] ", e);
return NextResponse.json(prettyObject(e));
}
}
async function request(req: NextRequest) {
const controller = new AbortController();
// alibaba use base url or just remove the path
let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, "");
let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL;
if (!baseUrl.startsWith("http")) {
baseUrl = `https://${baseUrl}`;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, -1);
}
console.log("[Proxy] ", path);
console.log("[Base Url]", baseUrl);
const timeoutId = setTimeout(
() => {
controller.abort();
},
10 * 60 * 1000,
);
const fetchUrl = `${baseUrl}${path}`;
const fetchOptions: RequestInit = {
headers: {
"Content-Type": "application/json",
Authorization: req.headers.get("Authorization") ?? "",
},
method: req.method,
body: req.body,
redirect: "manual",
// @ts-ignore
duplex: "half",
signal: controller.signal,
};
// #1815 try to refuse some request to some models
if (serverConfig.customModels && req.body) {
try {
const clonedBody = await req.text();
fetchOptions.body = clonedBody;
const jsonBody = JSON.parse(clonedBody) as { model?: string };
// not undefined and is false
if (
isModelNotavailableInServer(
serverConfig.customModels,
jsonBody?.model as string,
ServiceProvider.XAI as string,
)
) {
return NextResponse.json(
{
error: true,
message: `you are not allowed to use ${jsonBody?.model} model`,
},
{
status: 403,
},
);
}
} catch (e) {
console.error(`[XAI] filter`, e);
}
}
try {
const res = await fetch(fetchUrl, fetchOptions);
// to prevent browser prompt for credentials
const newHeaders = new Headers(res.headers);
newHeaders.delete("www-authenticate");
// to disable nginx buffering
newHeaders.set("X-Accel-Buffering", "no");
return new Response(res.body, {
status: res.status,
statusText: res.statusText,
headers: newHeaders,
});
} finally {
clearTimeout(timeoutId);
}
}

View File

@ -1,9 +0,0 @@
export function makeAzurePath(path: string, apiVersion: string) {
// should omit /v1 prefix
path = path.replaceAll("v1/", "");
// should add api-key to query string
path += `${path.includes("?") ? "&" : "?"}api-version=${apiVersion}`;
return path;
}

View File

@ -1,31 +1,75 @@
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { import {
ACCESS_CODE_PREFIX, ACCESS_CODE_PREFIX,
Azure,
ModelProvider, ModelProvider,
ServiceProvider, ServiceProvider,
} from "../constant"; } from "../constant";
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; import {
import { ChatGPTApi } from "./platforms/openai"; ChatMessageTool,
ChatMessage,
ModelType,
useAccessStore,
useChatStore,
} from "../store";
import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai";
import { GeminiProApi } from "./platforms/google"; import { GeminiProApi } from "./platforms/google";
import { ClaudeApi } from "./platforms/anthropic";
import { ErnieApi } from "./platforms/baidu";
import { DoubaoApi } from "./platforms/bytedance";
import { QwenApi } from "./platforms/alibaba";
import { HunyuanApi } from "./platforms/tencent";
import { MoonshotApi } from "./platforms/moonshot";
import { SparkApi } from "./platforms/iflytek";
import { DeepSeekApi } from "./platforms/deepseek";
import { XAIApi } from "./platforms/xai";
import { ChatGLMApi } from "./platforms/glm";
import { SiliconflowApi } from "./platforms/siliconflow";
export const ROLES = ["system", "user", "assistant"] as const; export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number]; export type MessageRole = (typeof ROLES)[number];
export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
export const TTSModels = ["tts-1", "tts-1-hd"] as const;
export type ChatModel = ModelType; export type ChatModel = ModelType;
export interface MultimodalContent {
type: "text" | "image_url";
text?: string;
image_url?: {
url: string;
};
}
export interface MultimodalContentForAlibaba {
text?: string;
image?: string;
}
export interface RequestMessage { export interface RequestMessage {
role: MessageRole; role: MessageRole;
content: string; content: string | MultimodalContent[];
} }
export interface LLMConfig { export interface LLMConfig {
model: string; model: string;
providerName?: string;
temperature?: number; temperature?: number;
top_p?: number; top_p?: number;
stream?: boolean; stream?: boolean;
presence_penalty?: number; presence_penalty?: number;
frequency_penalty?: number; frequency_penalty?: number;
size?: DalleRequestPayload["size"];
quality?: DalleRequestPayload["quality"];
style?: DalleRequestPayload["style"];
}
export interface SpeechOptions {
model: string;
input: string;
voice: string;
response_format?: string;
speed?: number;
onController?: (controller: AbortController) => void;
} }
export interface ChatOptions { export interface ChatOptions {
@ -33,9 +77,11 @@ export interface ChatOptions {
config: LLMConfig; config: LLMConfig;
onUpdate?: (message: string, chunk: string) => void; onUpdate?: (message: string, chunk: string) => void;
onFinish: (message: string) => void; onFinish: (message: string, responseRes: Response) => void;
onError?: (err: Error) => void; onError?: (err: Error) => void;
onController?: (controller: AbortController) => void; onController?: (controller: AbortController) => void;
onBeforeTool?: (tool: ChatMessageTool) => void;
onAfterTool?: (tool: ChatMessageTool) => void;
} }
export interface LLMUsage { export interface LLMUsage {
@ -45,18 +91,22 @@ export interface LLMUsage {
export interface LLMModel { export interface LLMModel {
name: string; name: string;
displayName?: string;
available: boolean; available: boolean;
provider: LLMModelProvider; provider: LLMModelProvider;
sorted: number;
} }
export interface LLMModelProvider { export interface LLMModelProvider {
id: string; id: string;
providerName: string; providerName: string;
providerType: string; providerType: string;
sorted: number;
} }
export abstract class LLMApi { export abstract class LLMApi {
abstract chat(options: ChatOptions): Promise<void>; abstract chat(options: ChatOptions): Promise<void>;
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
abstract usage(): Promise<LLMUsage>; abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>; abstract models(): Promise<LLMModel[]>;
} }
@ -86,11 +136,46 @@ export class ClientApi {
public llm: LLMApi; public llm: LLMApi;
constructor(provider: ModelProvider = ModelProvider.GPT) { constructor(provider: ModelProvider = ModelProvider.GPT) {
if (provider === ModelProvider.GeminiPro) { switch (provider) {
this.llm = new GeminiProApi(); case ModelProvider.GeminiPro:
return; this.llm = new GeminiProApi();
break;
case ModelProvider.Claude:
this.llm = new ClaudeApi();
break;
case ModelProvider.Ernie:
this.llm = new ErnieApi();
break;
case ModelProvider.Doubao:
this.llm = new DoubaoApi();
break;
case ModelProvider.Qwen:
this.llm = new QwenApi();
break;
case ModelProvider.Hunyuan:
this.llm = new HunyuanApi();
break;
case ModelProvider.Moonshot:
this.llm = new MoonshotApi();
break;
case ModelProvider.Iflytek:
this.llm = new SparkApi();
break;
case ModelProvider.DeepSeek:
this.llm = new DeepSeekApi();
break;
case ModelProvider.XAI:
this.llm = new XAIApi();
break;
case ModelProvider.ChatGLM:
this.llm = new ChatGLMApi();
break;
case ModelProvider.SiliconFlow:
this.llm = new SiliconflowApi();
break;
default:
this.llm = new ChatGPTApi();
} }
this.llm = new ChatGPTApi();
} }
config() {} config() {}
@ -139,40 +224,165 @@ export class ClientApi {
} }
} }
export function getHeaders() { export function getBearerToken(
const accessStore = useAccessStore.getState(); apiKey: string,
const headers: Record<string, string> = { noBearer: boolean = false,
"Content-Type": "application/json", ): string {
"x-requested-with": "XMLHttpRequest", return validString(apiKey)
Accept: "application/json", ? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
}; : "";
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; }
const isGoogle = modelConfig.model.startsWith("gemini");
const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization";
const apiKey = isGoogle
? accessStore.googleApiKey
: isAzure
? accessStore.azureApiKey
: accessStore.openaiApiKey;
const clientConfig = getClientConfig();
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
const validString = (x: string) => x && x.length > 0;
// when using google api in app, not set auth header export function validString(x: string): boolean {
if (!(isGoogle && clientConfig?.isApp)) { return x?.length > 0;
// use user's api key first }
if (validString(apiKey)) {
headers[authHeader] = makeBearer(apiKey); export function getHeaders(ignoreHeaders: boolean = false) {
} else if ( const accessStore = useAccessStore.getState();
accessStore.enabledAccessControl() && const chatStore = useChatStore.getState();
validString(accessStore.accessCode) let headers: Record<string, string> = {};
) { if (!ignoreHeaders) {
headers[authHeader] = makeBearer( headers = {
ACCESS_CODE_PREFIX + accessStore.accessCode, "Content-Type": "application/json",
); Accept: "application/json",
} };
}
const clientConfig = getClientConfig();
function getConfig() {
const modelConfig = chatStore.currentSession().mask.modelConfig;
const isGoogle = modelConfig.providerName === ServiceProvider.Google;
const isAzure = modelConfig.providerName === ServiceProvider.Azure;
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek;
const isDeepSeek = modelConfig.providerName === ServiceProvider.DeepSeek;
const isXAI = modelConfig.providerName === ServiceProvider.XAI;
const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM;
const isSiliconFlow =
modelConfig.providerName === ServiceProvider.SiliconFlow;
const isEnabledAccessControl = accessStore.enabledAccessControl();
const apiKey = isGoogle
? accessStore.googleApiKey
: isAzure
? accessStore.azureApiKey
: isAnthropic
? accessStore.anthropicApiKey
: isByteDance
? accessStore.bytedanceApiKey
: isAlibaba
? accessStore.alibabaApiKey
: isMoonshot
? accessStore.moonshotApiKey
: isXAI
? accessStore.xaiApiKey
: isDeepSeek
? accessStore.deepseekApiKey
: isChatGLM
? accessStore.chatglmApiKey
: isSiliconFlow
? accessStore.siliconflowApiKey
: isIflytek
? accessStore.iflytekApiKey && accessStore.iflytekApiSecret
? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret
: ""
: accessStore.openaiApiKey;
return {
isGoogle,
isAzure,
isAnthropic,
isBaidu,
isByteDance,
isAlibaba,
isMoonshot,
isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
isSiliconFlow,
apiKey,
isEnabledAccessControl,
};
}
function getAuthHeader(): string {
return isAzure
? "api-key"
: isAnthropic
? "x-api-key"
: isGoogle
? "x-goog-api-key"
: "Authorization";
}
const {
isGoogle,
isAzure,
isAnthropic,
isBaidu,
isByteDance,
isAlibaba,
isMoonshot,
isIflytek,
isDeepSeek,
isXAI,
isChatGLM,
isSiliconFlow,
apiKey,
isEnabledAccessControl,
} = getConfig();
// when using baidu api in app, not set auth header
if (isBaidu && clientConfig?.isApp) return headers;
const authHeader = getAuthHeader();
const bearerToken = getBearerToken(
apiKey,
isAzure || isAnthropic || isGoogle,
);
if (bearerToken) {
headers[authHeader] = bearerToken;
} else if (isEnabledAccessControl && validString(accessStore.accessCode)) {
headers["Authorization"] = getBearerToken(
ACCESS_CODE_PREFIX + accessStore.accessCode,
);
} }
return headers; return headers;
} }
export function getClientApi(provider: ServiceProvider): ClientApi {
switch (provider) {
case ServiceProvider.Google:
return new ClientApi(ModelProvider.GeminiPro);
case ServiceProvider.Anthropic:
return new ClientApi(ModelProvider.Claude);
case ServiceProvider.Baidu:
return new ClientApi(ModelProvider.Ernie);
case ServiceProvider.ByteDance:
return new ClientApi(ModelProvider.Doubao);
case ServiceProvider.Alibaba:
return new ClientApi(ModelProvider.Qwen);
case ServiceProvider.Tencent:
return new ClientApi(ModelProvider.Hunyuan);
case ServiceProvider.Moonshot:
return new ClientApi(ModelProvider.Moonshot);
case ServiceProvider.Iflytek:
return new ClientApi(ModelProvider.Iflytek);
case ServiceProvider.DeepSeek:
return new ClientApi(ModelProvider.DeepSeek);
case ServiceProvider.XAI:
return new ClientApi(ModelProvider.XAI);
case ServiceProvider.ChatGLM:
return new ClientApi(ModelProvider.ChatGLM);
case ServiceProvider.SiliconFlow:
return new ClientApi(ModelProvider.SiliconFlow);
default:
return new ClientApi(ModelProvider.GPT);
}
}

View File

@ -0,0 +1,277 @@
"use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import {
preProcessImageContentForAlibabaDashScope,
streamWithThink,
} from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
MultimodalContent,
MultimodalContentForAlibaba,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
isVisionModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
interface RequestInput {
messages: {
role: "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
}
interface RequestParam {
result_format: string;
incremental_output?: boolean;
temperature: number;
repetition_penalty?: number;
top_p: number;
max_tokens?: number;
}
interface RequestPayload {
model: string;
input: RequestInput;
parameters: RequestParam;
}
export class QwenApi implements LLMApi {
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.alibabaUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res?.output?.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = (
visionModel
? await preProcessImageContentForAlibabaDashScope(v.content)
: v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v)
) as any;
messages.push({ role: v.role, content });
}
const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = {
model: modelConfig.model,
input: {
messages,
},
parameters: {
result_format: "message",
incremental_output: shouldStream,
temperature: modelConfig.temperature,
// max_tokens: modelConfig.max_tokens,
top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1
},
};
const controller = new AbortController();
options.onController?.(controller);
try {
const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath(modelConfig.model));
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: headers,
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
headers,
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null | MultimodalContentForAlibaba[];
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.message?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: Array.isArray(content)
? content.map((item) => item.text).join(",")
: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}
export { Alibaba };

View File

@ -0,0 +1,415 @@
import { Anthropic, ApiPath } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api";
import {
useAccessStore,
useAppConfig,
useChatStore,
usePluginStore,
ChatMessageTool,
} from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import { ANTHROPIC_BASE_URL } from "@/app/constant";
import { getMessageTextContent, isVisionModel } from "@/app/utils";
import { preProcessImageContent, stream } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export type MultiBlockContent = {
type: "image" | "text";
source?: {
type: string;
media_type: string;
data: string;
};
text?: string;
};
export type AnthropicMessage = {
role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper];
content: string | MultiBlockContent[];
};
export interface AnthropicChatRequest {
model: string; // The model that will complete your prompt.
messages: AnthropicMessage[]; // The prompt that you want Claude to complete.
max_tokens: number; // The maximum number of tokens to generate before stopping.
stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
temperature?: number; // Amount of randomness injected into the response.
top_p?: number; // Use nucleus sampling.
top_k?: number; // Only sample from the top K options for each subsequent token.
metadata?: object; // An object describing metadata about the request.
stream?: boolean; // Whether to incrementally stream the response using server-sent events.
}
export interface ChatRequest {
model: string; // The model that will complete your prompt.
prompt: string; // The prompt that you want Claude to complete.
max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping.
stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
temperature?: number; // Amount of randomness injected into the response.
top_p?: number; // Use nucleus sampling.
top_k?: number; // Only sample from the top K options for each subsequent token.
metadata?: object; // An object describing metadata about the request.
stream?: boolean; // Whether to incrementally stream the response using server-sent events.
}
export interface ChatResponse {
completion: string;
stop_reason: "stop_sequence" | "max_tokens";
model: string;
}
export type ChatStreamResponse = ChatResponse & {
stop?: string;
log_id: string;
};
const ClaudeMapper = {
assistant: "assistant",
user: "user",
system: "user",
} as const;
const keys = ["claude-2, claude-instant-1"];
export class ClaudeApi implements LLMApi {
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
extractMessage(res: any) {
console.log("[Response] claude response: ", res);
return res?.content?.[0]?.text;
}
async chat(options: ChatOptions): Promise<void> {
const visionModel = isVisionModel(options.config.model);
const accessStore = useAccessStore.getState();
const shouldStream = !!options.config.stream;
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
// try get base64image from local cache image_url
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}
const keys = ["system", "user"];
// roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages
for (let i = 0; i < messages.length - 1; i++) {
const message = messages[i];
const nextMessage = messages[i + 1];
if (keys.includes(message.role) && keys.includes(nextMessage.role)) {
messages[i] = [
message,
{
role: "assistant",
content: ";",
},
] as any;
}
}
const prompt = messages
.flat()
.filter((v) => {
if (!v.content) return false;
if (typeof v.content === "string" && !v.content.trim()) return false;
return true;
})
.map((v) => {
const { role, content } = v;
const insideRole = ClaudeMapper[role] ?? "user";
if (!visionModel || typeof content === "string") {
return {
role: insideRole,
content: getMessageTextContent(v),
};
}
return {
role: insideRole,
content: content
.filter((v) => v.image_url || v.text)
.map(({ type, text, image_url }) => {
if (type === "text") {
return {
type,
text: text!,
};
}
const { url = "" } = image_url || {};
const colonIndex = url.indexOf(":");
const semicolonIndex = url.indexOf(";");
const comma = url.indexOf(",");
const mimeType = url.slice(colonIndex + 1, semicolonIndex);
const encodeType = url.slice(semicolonIndex + 1, comma);
const data = url.slice(comma + 1);
return {
type: "image" as const,
source: {
type: encodeType,
media_type: mimeType,
data,
},
};
}),
};
});
if (prompt[0]?.role === "assistant") {
prompt.unshift({
role: "user",
content: ";",
});
}
const requestBody: AnthropicChatRequest = {
messages: prompt,
stream: shouldStream,
model: modelConfig.model,
max_tokens: modelConfig.max_tokens,
temperature: modelConfig.temperature,
top_p: modelConfig.top_p,
// top_k: modelConfig.top_k,
top_k: 5,
};
const path = this.path(Anthropic.ChatPath);
const controller = new AbortController();
options.onController?.(controller);
if (shouldStream) {
let index = -1;
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
path,
requestBody,
{
...getHeaders(),
"anthropic-version": accessStore.anthropicApiVersion,
},
// @ts-ignore
tools.map((tool) => ({
name: tool?.function?.name,
description: tool?.function?.description,
input_schema: tool?.function?.parameters,
})),
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
let chunkJson:
| undefined
| {
type: "content_block_delta" | "content_block_stop";
content_block?: {
type: "tool_use";
id: string;
name: string;
};
delta?: {
type: "text_delta" | "input_json_delta";
text?: string;
partial_json?: string;
};
index: number;
};
chunkJson = JSON.parse(text);
if (chunkJson?.content_block?.type == "tool_use") {
index += 1;
const id = chunkJson?.content_block.id;
const name = chunkJson?.content_block.name;
runTools.push({
id,
type: "function",
function: {
name,
arguments: "",
},
});
}
if (
chunkJson?.delta?.type == "input_json_delta" &&
chunkJson?.delta?.partial_json
) {
// @ts-ignore
runTools[index]["function"]["arguments"] +=
chunkJson?.delta?.partial_json;
}
return chunkJson?.delta?.text;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// reset index value
index = -1;
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
{
role: "assistant",
content: toolCallMessage.tool_calls.map(
(tool: ChatMessageTool) => ({
type: "tool_use",
id: tool.id,
name: tool?.function?.name,
input: tool?.function?.arguments
? JSON.parse(tool?.function?.arguments)
: {},
}),
),
},
// @ts-ignore
...toolCallResult.map((result) => ({
role: "user",
content: [
{
type: "tool_result",
tool_use_id: result.tool_call_id,
content: result.content,
},
],
})),
);
},
options,
);
} else {
const payload = {
method: "POST",
body: JSON.stringify(requestBody),
signal: controller.signal,
headers: {
...getHeaders(), // get common headers
"anthropic-version": accessStore.anthropicApiVersion,
// do not send `anthropicApiKey` in browser!!!
// Authorization: getAuthKey(accessStore.anthropicApiKey),
},
};
try {
controller.signal.onabort = () =>
options.onFinish("", new Response(null, { status: 400 }));
const res = await fetch(path, payload);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
} catch (e) {
console.error("failed to chat", e);
options.onError?.(e as Error);
}
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models() {
// const provider = {
// id: "anthropic",
// providerName: "Anthropic",
// providerType: "anthropic",
// };
return [
// {
// name: "claude-instant-1.2",
// available: true,
// provider,
// },
// {
// name: "claude-2.0",
// available: true,
// provider,
// },
// {
// name: "claude-2.1",
// available: true,
// provider,
// },
// {
// name: "claude-3-opus-20240229",
// available: true,
// provider,
// },
// {
// name: "claude-3-sonnet-20240229",
// available: true,
// provider,
// },
// {
// name: "claude-3-haiku-20240307",
// available: true,
// provider,
// },
];
}
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl: string = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.anthropicUrl;
}
// if endpoint is empty, use default endpoint
if (baseUrl.trim().length === 0) {
const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic;
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
baseUrl = "https://" + baseUrl;
}
baseUrl = trimEnd(baseUrl, "/");
// try rebuild url, when using cloudflare ai gateway in client
return cloudflareAIGatewayUrl(`${baseUrl}/${path}`);
}
}
function trimEnd(s: string, end = " ") {
if (end.length === 0) return s;
while (s.endsWith(end)) {
s = s.slice(0, -end.length);
}
return s;
}

View File

@ -0,0 +1,284 @@
"use client";
import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
interface RequestPayload {
messages: {
role: "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
model: string;
temperature: number;
presence_penalty: number;
frequency_penalty: number;
top_p: number;
max_tokens?: number;
}
export class ErnieApi implements LLMApi {
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.baiduUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
// do not use proxy for baidubce api
baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({
// "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",
role: v.role === "system" ? "user" : v.role,
content: getMessageTextContent(v),
}));
// "error_code": 336006, "error_msg": "the length of messages must be an odd number",
if (messages.length % 2 === 0) {
if (messages.at(0)?.role === "user") {
messages.splice(1, 0, {
role: "assistant",
content: " ",
});
} else {
messages.unshift({
role: "user",
content: " ",
});
}
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = {
messages,
stream: shouldStream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
};
console.log("[Request] Baidu payload: ", requestPayload);
const controller = new AbortController();
options.onController?.(controller);
try {
let chatPath = this.path(Baidu.ChatPath(modelConfig.model));
// getAccessToken can not run in browser, because cors error
if (!!getClientConfig()?.isApp) {
const accessStore = useAccessStore.getState();
if (accessStore.useCustomConfig) {
if (accessStore.isValidBaidu()) {
const { access_token } = await getAccessToken(
accessStore.baiduApiKey,
accessStore.baiduSecretKey,
);
chatPath = `${chatPath}${
chatPath.includes("?") ? "&" : "?"
}access_token=${access_token}`;
}
}
}
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
let responseText = "";
let remainText = "";
let finished = false;
let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log("[Baidu] request response content type: ", contentType);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text);
const delta = json?.result;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = resJson?.result;
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}
export { Baidu };

View File

@ -0,0 +1,250 @@
"use client";
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import { streamWithThink } from "@/app/utils/chat";
import { getClientConfig } from "@/app/config/client";
import { preProcessImageContent } from "@/app/utils/chat";
import {
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
interface RequestPayloadForByteDance {
messages: {
role: "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
model: string;
temperature: number;
presence_penalty: number;
frequency_penalty: number;
top_p: number;
max_tokens?: number;
}
export class DoubaoApi implements LLMApi {
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.bytedanceUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content =
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const shouldStream = !!options.config.stream;
const requestPayload: RequestPayloadForByteDance = {
messages,
stream: shouldStream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
};
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(ByteDance.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayloadForByteDance,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.messages?.splice(
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}
export { ByteDance };

View File

@ -0,0 +1,253 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class DeepSeekApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.deepseekUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.DeepSeek;
baseUrl = isApp ? DEEPSEEK_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.DeepSeek)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content });
} else {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
}
// 检测并修复消息顺序确保除system外的第一个消息是user
const filteredMessages: ChatOptions["messages"] = [];
let hasFoundFirstUser = false;
for (const msg of messages) {
if (msg.role === "system") {
// Keep all system messages
filteredMessages.push(msg);
} else if (msg.role === "user") {
// User message directly added
filteredMessages.push(msg);
hasFoundFirstUser = true;
} else if (hasFoundFirstUser) {
// After finding the first user message, all subsequent non-system messages are retained.
filteredMessages.push(msg);
}
// If hasFoundFirstUser is false and it is not a system message, it will be skipped.
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages: filteredMessages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(DeepSeek.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

292
app/client/platforms/glm.ts Normal file
View File

@ -0,0 +1,292 @@
"use client";
import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat";
interface BasePayload {
model: string;
}
interface ChatPayload extends BasePayload {
messages: ChatOptions["messages"];
stream?: boolean;
temperature?: number;
presence_penalty?: number;
frequency_penalty?: number;
top_p?: number;
}
interface ImageGenerationPayload extends BasePayload {
prompt: string;
size?: string;
user_id?: string;
}
interface VideoGenerationPayload extends BasePayload {
prompt: string;
duration?: number;
resolution?: string;
user_id?: string;
}
type ModelType = "chat" | "image" | "video";
export class ChatGLMApi implements LLMApi {
private disableListModels = true;
private getModelType(model: string): ModelType {
if (model.startsWith("cogview-")) return "image";
if (model.startsWith("cogvideo-")) return "video";
return "chat";
}
private getModelPath(type: ModelType): string {
switch (type) {
case "image":
return ChatGLM.ImagePath;
case "video":
return ChatGLM.VideoPath;
default:
return ChatGLM.ChatPath;
}
}
private createPayload(
messages: ChatOptions["messages"],
modelConfig: any,
options: ChatOptions,
): BasePayload {
const modelType = this.getModelType(modelConfig.model);
const lastMessage = messages[messages.length - 1];
const prompt =
typeof lastMessage.content === "string"
? lastMessage.content
: lastMessage.content.map((c) => c.text).join("\n");
switch (modelType) {
case "image":
return {
model: modelConfig.model,
prompt,
size: options.config.size,
} as ImageGenerationPayload;
default:
return {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
} as ChatPayload;
}
}
private parseResponse(modelType: ModelType, json: any): string {
switch (modelType) {
case "image": {
const imageUrl = json.data?.[0]?.url;
return imageUrl ? `![Generated Image](${imageUrl})` : "";
}
case "video": {
const videoUrl = json.data?.[0]?.url;
return videoUrl ? `<video controls src="${videoUrl}"></video>` : "";
}
default:
return this.extractMessage(json);
}
}
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.chatglmUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.ChatGLM;
baseUrl = isApp ? CHATGLM_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ChatGLM)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const modelType = this.getModelType(modelConfig.model);
const requestPayload = this.createPayload(messages, modelConfig, options);
const path = this.path(this.getModelPath(modelType));
console.log(`[Request] glm ${modelType} payload: `, requestPayload);
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (modelType === "image" || modelType === "video") {
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
console.log(`[Response] glm ${modelType}:`, resJson);
const message = this.parseResponse(modelType, resJson);
options.onFinish(message, res);
return;
}
const shouldStream = !!options.config.stream;
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
path,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(path, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -1,24 +1,128 @@
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, Google } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import {
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
SpeechOptions,
} from "../api";
import {
useAccessStore,
useAppConfig,
useChatStore,
usePluginStore,
ChatMessageTool,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant"; import { GEMINI_BASE_URL } from "@/app/constant";
import {
getMessageTextContent,
getMessageImages,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class GeminiProApi implements LLMApi { export class GeminiProApi implements LLMApi {
path(path: string, shouldStream = false): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.googleUrl;
}
const isApp = !!getClientConfig()?.isApp;
if (baseUrl.length === 0) {
baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
let chatPath = [baseUrl, path].join("/");
if (shouldStream) {
chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
}
return chatPath;
}
extractMessage(res: any) { extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res); console.log("[Response] gemini-pro response: ", res);
const getTextFromParts = (parts: any[]) => {
if (!Array.isArray(parts)) return "";
return parts
.map((part) => part?.text || "")
.filter((text) => text.trim() !== "")
.join("\n\n");
};
let content = "";
if (Array.isArray(res)) {
res.map((item) => {
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
});
}
return ( return (
res?.candidates?.at(0)?.content?.parts.at(0)?.text || getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message || res?.error?.message ||
"" ""
); );
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions): Promise<void> { async chat(options: ChatOptions): Promise<void> {
// const apiClient = this; const apiClient = this;
const messages = options.messages.map((v) => ({ let multimodal = false;
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }], // try get base64image from local cache image_url
})); const _messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
_messages.push({ role: v.role, content });
}
const messages = _messages.map((v) => {
let parts: any[] = [{ text: getMessageTextContent(v) }];
if (isVisionModel(options.config.model)) {
const images = getMessageImages(v);
if (images.length > 0) {
multimodal = true;
parts = parts.concat(
images.map((image) => {
const imageType = image.split(";")[0].split(":")[1];
const imageData = image.split(",")[1];
return {
inline_data: {
mime_type: imageType,
data: imageData,
},
};
}),
);
}
}
return {
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: parts,
};
});
// google requires that role in neighboring messages must not be the same // google requires that role in neighboring messages must not be the same
for (let i = 0; i < messages.length - 1; ) { for (let i = 0; i < messages.length - 1; ) {
@ -33,6 +137,11 @@ export class GeminiProApi implements LLMApi {
i++; i++;
} }
} }
// if (visionModel && messages.length > 1) {
// options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
// }
const accessStore = useAccessStore.getState();
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,
@ -55,44 +164,33 @@ export class GeminiProApi implements LLMApi {
safetySettings: [ safetySettings: [
{ {
category: "HARM_CATEGORY_HARASSMENT", category: "HARM_CATEGORY_HARASSMENT",
threshold: "BLOCK_ONLY_HIGH", threshold: accessStore.googleSafetySettings,
}, },
{ {
category: "HARM_CATEGORY_HATE_SPEECH", category: "HARM_CATEGORY_HATE_SPEECH",
threshold: "BLOCK_ONLY_HIGH", threshold: accessStore.googleSafetySettings,
}, },
{ {
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
threshold: "BLOCK_ONLY_HIGH", threshold: accessStore.googleSafetySettings,
}, },
{ {
category: "HARM_CATEGORY_DANGEROUS_CONTENT", category: "HARM_CATEGORY_DANGEROUS_CONTENT",
threshold: "BLOCK_ONLY_HIGH", threshold: accessStore.googleSafetySettings,
}, },
], ],
}; };
const accessStore = useAccessStore.getState();
let baseUrl = accessStore.googleUrl;
const isApp = !!getClientConfig()?.isApp;
let shouldStream = !!options.config.stream; let shouldStream = !!options.config.stream;
const controller = new AbortController(); const controller = new AbortController();
options.onController?.(controller); options.onController?.(controller);
try { try {
let chatPath = this.path(Google.ChatPath); // https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
const chatPath = this.path(
Google.ChatPath(modelConfig.model),
shouldStream,
);
// let baseUrl = accessStore.googleUrl;
if (!baseUrl) {
baseUrl = isApp
? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath
: chatPath;
}
if (isApp) {
baseUrl += `?key=${accessStore.googleApiKey}`;
}
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
@ -100,99 +198,97 @@ export class GeminiProApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
const isThinking = options.config.model.includes("-thinking");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
// @ts-ignore
tools.length > 0
? // @ts-ignore
[{ functionDeclarations: tools.map((tool) => tool.function) }]
: [],
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const chunkJson = JSON.parse(text);
let existingTexts: string[] = []; const functionCall = chunkJson?.candidates
const finish = () => { ?.at(0)
finished = true; ?.content.parts.at(0)?.functionCall;
options.onFinish(existingTexts.join("")); if (functionCall) {
}; const { name, args } = functionCall;
runTools.push({
// animate response to make it looks smooth id: nanoid(),
function animateResponseText() { type: "function",
if (finished || controller.signal.aborted) { function: {
responseText += remainText; name,
finish(); arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse
return; },
} });
}
if (remainText.length > 0) { return chunkJson?.candidates
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); ?.at(0)
const fetchText = remainText.slice(0, fetchCount); ?.content.parts?.map((part: { text: string }) => part.text)
responseText += fetchText; .join("\n\n");
remainText = remainText.slice(fetchCount); },
options.onUpdate?.(responseText, fetchText); // processToolMessage, include tool_calls message and tool call results
} (
requestPayload: RequestPayload,
requestAnimationFrame(animateResponseText); toolCallMessage: any,
} toolCallResult: any[],
) => {
// start animaion // @ts-ignore
animateResponseText(); requestPayload?.contents?.splice(
// @ts-ignore
fetch( requestPayload?.contents?.length,
baseUrl.replace("generateContent", "streamGenerateContent"), 0,
chatPayload, {
) role: "model",
.then((response) => { parts: toolCallMessage.tool_calls.map(
const reader = response?.body?.getReader(); (tool: ChatMessageTool) => ({
const decoder = new TextDecoder(); functionCall: {
let partialData = ""; name: tool?.function?.name,
args: JSON.parse(tool?.function?.arguments as string),
return reader?.read().then(function processText({ },
done, }),
value, ),
}): Promise<any> { },
if (done) { // @ts-ignore
console.log("Stream complete"); ...toolCallResult.map((result) => ({
// options.onFinish(responseText + remainText); role: "function",
finished = true; parts: [
return Promise.resolve(); {
} functionResponse: {
name: result.name,
partialData += decoder.decode(value, { stream: true }); response: {
name: result.name,
try { content: result.content, // TODO just text content...
let data = JSON.parse(ensureProperEnding(partialData)); },
},
const textArray = data.reduce(
(acc: string[], item: { candidates: any[] }) => {
const texts = item.candidates.map((candidate) =>
candidate.content.parts
.map((part: { text: any }) => part.text)
.join(""),
);
return acc.concat(texts);
}, },
[], ],
); })),
);
if (textArray.length > existingTexts.length) { },
const deltaArray = textArray.slice(existingTexts.length); options,
existingTexts = textArray; );
remainText += deltaArray.join("");
}
} catch (error) {
// console.log("[Response Animation] error: ", error,partialData);
// skip error message when parsing json
}
return reader.read().then(processText);
});
})
.catch((error) => {
console.error("Error:", error);
});
} else { } else {
const res = await fetch(baseUrl, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const resJson = await res.json(); const resJson = await res.json();
if (resJson?.promptFeedback?.blockReason) { if (resJson?.promptFeedback?.blockReason) {
@ -204,8 +300,8 @@ export class GeminiProApi implements LLMApi {
), ),
); );
} }
const message = this.extractMessage(resJson); const message = apiClient.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);
@ -218,14 +314,4 @@ export class GeminiProApi implements LLMApi {
async models(): Promise<LLMModel[]> { async models(): Promise<LLMModel[]> {
return []; return [];
} }
path(path: string): string {
return "/api/google/" + path;
}
}
function ensureProperEnding(str: string) {
if (str.startsWith("[") && !str.endsWith("]")) {
return str + "]";
}
return str;
} }

View File

@ -0,0 +1,253 @@
"use client";
import {
ApiPath,
IFLYTEK_BASE_URL,
Iflytek,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { fetch } from "@/app/utils/stream";
import { RequestPayload } from "./openai";
export class SparkApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.iflytekUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Iflytek;
baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Iflytek)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] Spark payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(Iflytek.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// Make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (shouldStream) {
let responseText = "";
let remainText = "";
let finished = false;
let responseRes: Response;
// Animate response text to make it look smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// Start animation
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log("[Spark] request response content type: ", contentType);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
// Handle different error scenarios
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
extraInfo = Locale.Error.Unauthorized;
}
options.onError?.(
new Error(
`Request failed with status ${res.status}: ${extraInfo}`,
),
);
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: { content: string };
}>;
const delta = choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text);
options.onError?.(new Error(`Failed to parse response: ${text}`));
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
if (!res.ok) {
const errorText = await res.text();
options.onError?.(
new Error(`Request failed with status ${res.status}: ${errorText}`),
);
return;
}
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -0,0 +1,200 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
MOONSHOT_BASE_URL,
Moonshot,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class MoonshotApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.moonshotUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Moonshot;
baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(Moonshot.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -1,23 +1,49 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi.
import { import {
ApiPath, ApiPath,
DEFAULT_API_HOST, OPENAI_BASE_URL,
DEFAULT_MODELS, DEFAULT_MODELS,
OpenaiPath, OpenaiPath,
Azure,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
ServiceProvider, ServiceProvider,
} from "@/app/constant"; } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import Locale from "../../locales";
import { import {
EventStreamContentType, ChatMessageTool,
fetchEventSource, useAccessStore,
} from "@fortaine/fetch-event-source"; useAppConfig,
import { prettyObject } from "@/app/utils/format"; useChatStore,
usePluginStore,
} from "@/app/store";
import { collectModelsWithDefaultModel } from "@/app/utils/model";
import {
preProcessImageContent,
uploadImage,
base64Image2Blob,
streamWithThink,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { makeAzurePath } from "@/app/azure"; import {
getMessageTextContent,
isVisionModel,
isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
object: string; object: string;
@ -28,84 +54,114 @@ export interface OpenAIListModelResponse {
}>; }>;
} }
export interface RequestPayload {
messages: {
role: "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
model: string;
temperature: number;
presence_penalty: number;
frequency_penalty: number;
top_p: number;
max_tokens?: number;
max_completion_tokens?: number;
}
export interface DalleRequestPayload {
model: string;
prompt: string;
response_format: "url" | "b64_json";
n: number;
size: ModelSize;
quality: DalleQuality;
style: DalleStyle;
}
export class ChatGPTApi implements LLMApi { export class ChatGPTApi implements LLMApi {
private disableListModels = true; private disableListModels = true;
path(path: string): string { path(path: string): string {
const accessStore = useAccessStore.getState(); const accessStore = useAccessStore.getState();
const isAzure = accessStore.provider === ServiceProvider.Azure; let baseUrl = "";
if (isAzure && !accessStore.isValidAzure()) { const isAzure = path.includes("deployments");
throw Error( if (accessStore.useCustomConfig) {
"incomplete azure config, please check it in your settings page", if (isAzure && !accessStore.isValidAzure()) {
); throw Error(
"incomplete azure config, please check it in your settings page",
);
}
baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
} }
let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
if (baseUrl.length === 0) { if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp; const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI baseUrl = isApp ? OPENAI_BASE_URL : apiPath;
: ApiPath.OpenAI;
} }
if (baseUrl.endsWith("/")) { if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1); baseUrl = baseUrl.slice(0, baseUrl.length - 1);
} }
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) { if (
!baseUrl.startsWith("http") &&
!isAzure &&
!baseUrl.startsWith(ApiPath.OpenAI)
) {
baseUrl = "https://" + baseUrl; baseUrl = "https://" + baseUrl;
} }
if (isAzure) {
path = makeAzurePath(path, accessStore.azureApiVersion);
}
console.log("[Proxy Endpoint] ", baseUrl, path); console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/"); // try rebuild url, when using cloudflare ai gateway in client
return cloudflareAIGatewayUrl([baseUrl, path].join("/"));
} }
extractMessage(res: any) { async extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? ""; if (res.error) {
return "```\n" + JSON.stringify(res, null, 4) + "\n```";
}
// dalle3 model return url, using url create image message
if (res.data) {
let url = res.data?.at(0)?.url ?? "";
const b64_json = res.data?.at(0)?.b64_json ?? "";
if (!url && b64_json) {
// uploadImage
url = await uploadImage(base64Image2Blob(b64_json, "image/png"));
}
return [
{
type: "image_url",
image_url: {
url,
},
},
];
}
return res.choices?.at(0)?.message?.content ?? res;
} }
async chat(options: ChatOptions) { async speech(options: SpeechOptions): Promise<ArrayBuffer> {
const messages = options.messages.map((v) => ({
role: v.role,
content: v.content,
}));
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const requestPayload = { const requestPayload = {
messages, model: options.model,
stream: options.config.stream, input: options.input,
model: modelConfig.model, voice: options.voice,
temperature: modelConfig.temperature, response_format: options.response_format,
presence_penalty: modelConfig.presence_penalty, speed: options.speed,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
console.log("[Request] openai payload: ", requestPayload); console.log("[Request] openai speech payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController(); const controller = new AbortController();
options.onController?.(controller); options.onController?.(controller);
try { try {
const chatPath = this.path(OpenaiPath.ChatPath); const speechPath = this.path(OpenaiPath.SpeechPath);
const chatPayload = { const speechPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
signal: controller.signal, signal: controller.signal,
@ -118,121 +174,237 @@ export class ChatGPTApi implements LLMApi {
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
); );
const res = await fetch(speechPath, speechPayload);
clearTimeout(requestTimeoutId);
return await res.arrayBuffer();
} catch (e) {
console.log("[Request] failed to make a speech request", e);
throw e;
}
}
async chat(options: ChatOptions) {
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
let requestPayload: RequestPayload | DalleRequestPayload;
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
);
requestPayload = {
model: options.config.model,
prompt,
// URLs are only valid for 60 minutes after the image has been generated.
response_format: "b64_json", // using b64_json, and save image in CacheStorage
n: 1,
size: options.config?.size ?? "1024x1024",
quality: options.config?.quality ?? "standard",
style: options.config?.style ?? "vivid",
};
} else {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
if (!(isO1OrO3 && v.role === "system"))
messages.push({ role: v.role, content });
}
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
requestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !isDalle3 && !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
let chatPath = "";
if (modelConfig.providerName === ServiceProvider.Azure) {
// find model, and get displayName as deployName
const { models: configModels, customModels: configCustomModels } =
useAppConfig.getState();
const {
defaultModel,
customModels: accessCustomModels,
useCustomConfig,
} = useAccessStore.getState();
const models = collectModelsWithDefaultModel(
configModels,
[configCustomModels, accessCustomModels].join(","),
defaultModel,
);
const model = models.find(
(model) =>
model.name === modelConfig.model &&
model?.provider?.providerName === ServiceProvider.Azure,
);
chatPath = this.path(
(isDalle3 ? Azure.ImagePath : Azure.ChatPath)(
(model?.displayName ?? model?.name) as string,
useCustomConfig ? useAccessStore.getState().azureApiVersion : "",
),
);
} else {
chatPath = this.path(
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
);
}
if (shouldStream) { if (shouldStream) {
let responseText = ""; let index = -1;
let remainText = ""; const [tools, funcs] = usePluginStore
let finished = false; .getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
// console.log("getAsTools", tools, funcs);
streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
return;
}
if (remainText.length > 0) { const tool_calls = choices[0]?.delta?.tool_calls;
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); if (tool_calls?.length > 0) {
const fetchText = remainText.slice(0, fetchCount); const id = tool_calls[0]?.id;
responseText += fetchText; const args = tool_calls[0]?.function?.arguments;
remainText = remainText.slice(fetchCount); if (id) {
options.onUpdate?.(responseText, fetchText); index += 1;
} runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
requestAnimationFrame(animateResponseText); const reasoning = choices[0]?.delta?.reasoning_content;
} const content = choices[0]?.delta?.content;
// start animaion // Skip if both content and reasoning_content are empty or null
animateResponseText(); if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
const finish = () => { if (reasoning && reasoning.length > 0) {
if (!finished) { return {
finished = true; isThinking: true,
options.onFinish(responseText + remainText); content: reasoning,
} };
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// reset index value
index = -1;
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
}; };
controller.signal.onabort = finish; // make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[OpenAI] request response content type: ",
contentType,
);
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text) as {
choices: Array<{
delta: {
content: string;
};
}>;
};
const delta = json.choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
} else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);
const resJson = await res.json(); const resJson = await res.json();
const message = this.extractMessage(resJson); const message = await this.extractMessage(resJson);
options.onFinish(message); options.onFinish(message, res);
} }
} catch (e) { } catch (e) {
console.log("[Request] failed to make a chat request", e); console.log("[Request] failed to make a chat request", e);
@ -318,20 +490,26 @@ export class ChatGPTApi implements LLMApi {
}); });
const resJson = (await res.json()) as OpenAIListModelResponse; const resJson = (await res.json()) as OpenAIListModelResponse;
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); const chatModels = resJson.data?.filter(
(m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
);
console.log("[Models]", chatModels); console.log("[Models]", chatModels);
if (!chatModels) { if (!chatModels) {
return []; return [];
} }
//由于目前 OpenAI 的 disableListModels 默认为 true所以当前实际不会运行到这场
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({ return chatModels.map((m) => ({
name: m.id, name: m.id,
available: true, available: true,
sorted: seq++,
provider: { provider: {
id: "openai", id: "openai",
providerName: "OpenAI", providerName: "OpenAI",
providerType: "openai", providerType: "openai",
sorted: 1,
}, },
})); }));
} }

View File

@ -0,0 +1,287 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
SILICONFLOW_BASE_URL,
SiliconFlow,
DEFAULT_MODELS,
} from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { preProcessImageContent, streamWithThink } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export interface SiliconFlowListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
export class SiliconflowApi implements LLMApi {
private disableListModels = false;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.siliconflowUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.SiliconFlow;
baseUrl = isApp ? SILICONFLOW_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (
!baseUrl.startsWith("http") &&
!baseUrl.startsWith(ApiPath.SiliconFlow)
) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
if (v.role === "assistant") {
const content = getMessageTextContentWithoutThinking(v);
messages.push({ role: v.role, content });
} else {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(SiliconFlow.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// console.log(chatPayload);
// Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
if (this.disableListModels) {
return DEFAULT_MODELS.slice();
}
const res = await fetch(this.path(SiliconFlow.ListModelPath), {
method: "GET",
headers: {
...getHeaders(),
},
});
const resJson = (await res.json()) as SiliconFlowListModelResponse;
const chatModels = resJson.data;
console.log("[Models]", chatModels);
if (!chatModels) {
return [];
}
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "siliconflow",
providerName: "SiliconFlow",
providerType: "siliconflow",
sorted: 14,
},
}));
}
}

View File

@ -0,0 +1,278 @@
"use client";
import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
MultimodalContent,
SpeechOptions,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray";
import isObject from "lodash-es/isObject";
import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse {
object: string;
data: Array<{
id: string;
object: string;
root: string;
}>;
}
interface RequestPayload {
Messages: {
Role: "system" | "user" | "assistant";
Content: string | MultimodalContent[];
}[];
Stream?: boolean;
Model: string;
Temperature: number;
TopP: number;
}
function capitalizeKeys(obj: any): any {
if (isArray(obj)) {
return obj.map(capitalizeKeys);
} else if (isObject(obj)) {
return mapValues(
mapKeys(obj, (value: any, key: string) =>
key.replace(/(^|_)(\w)/g, (m, $1, $2) => $2.toUpperCase()),
),
capitalizeKeys,
);
} else {
return obj;
}
}
export class HunyuanApi implements LLMApi {
path(): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.tencentUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Tencent)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl);
return baseUrl;
}
extractMessage(res: any) {
return res.Choices?.at(0)?.Message?.Content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages = options.messages.map((v, index) => ({
// "Messages 中 system 角色必须位于列表的最开始"
role: index !== 0 && v.role === "system" ? "user" : v.role,
content: visionModel ? v.content : getMessageTextContent(v),
}));
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const requestPayload: RequestPayload = capitalizeKeys({
model: modelConfig.model,
messages,
temperature: modelConfig.temperature,
top_p: modelConfig.top_p,
stream: options.config.stream,
});
console.log("[Request] Tencent payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path();
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
let responseText = "";
let remainText = "";
let finished = false;
let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Tencent] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text);
const choices = json.Choices as Array<{
Delta: { Content: string };
}>;
const delta = choices[0]?.Delta?.Content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

194
app/client/platforms/xai.ts Normal file
View File

@ -0,0 +1,194 @@
"use client";
// azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import {
useAccessStore,
useAppConfig,
useChatStore,
ChatMessageTool,
usePluginStore,
} from "@/app/store";
import { stream } from "@/app/utils/chat";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream";
export class XAIApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = "";
if (accessStore.useCustomConfig) {
baseUrl = accessStore.xaiUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.XAI;
baseUrl = isApp ? XAI_BASE_URL : apiPath;
}
if (baseUrl.endsWith("/")) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) {
baseUrl = "https://" + baseUrl;
}
console.log("[Proxy Endpoint] ", baseUrl, path);
return [baseUrl, path].join("/");
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
};
console.log("[Request] xai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(XAI.ChatPath);
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
getTimeoutMSByModel(options.config.model),
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index]["function"]["arguments"] += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}

View File

@ -38,16 +38,20 @@ interface ChatCommands {
next?: Command; next?: Command;
prev?: Command; prev?: Command;
clear?: Command; clear?: Command;
fork?: Command;
del?: Command; del?: Command;
} }
export const ChatCommandPrefix = ":"; // Compatible with Chinese colon character ""
export const ChatCommandPrefix = /^[:]/;
export function useChatCommand(commands: ChatCommands = {}) { export function useChatCommand(commands: ChatCommands = {}) {
function extract(userInput: string) { function extract(userInput: string) {
return ( const match = userInput.match(ChatCommandPrefix);
userInput.startsWith(ChatCommandPrefix) ? userInput.slice(1) : userInput if (match) {
) as keyof ChatCommands; return userInput.slice(1) as keyof ChatCommands;
}
return userInput as keyof ChatCommands;
} }
function search(userInput: string) { function search(userInput: string) {
@ -57,7 +61,7 @@ export function useChatCommand(commands: ChatCommands = {}) {
.filter((c) => c.startsWith(input)) .filter((c) => c.startsWith(input))
.map((c) => ({ .map((c) => ({
title: desc[c as keyof ChatCommands], title: desc[c as keyof ChatCommands],
content: ChatCommandPrefix + c, content: ":" + c,
})); }));
} }

View File

@ -0,0 +1,31 @@
.artifacts {
display: flex;
width: 100%;
height: 100%;
flex-direction: column;
&-header {
display: flex;
align-items: center;
height: 36px;
padding: 20px;
background: var(--second);
}
&-title {
flex: 1;
text-align: center;
font-weight: bold;
font-size: 24px;
}
&-content {
flex-grow: 1;
padding: 0 20px 20px 20px;
background-color: var(--second);
}
}
.artifacts-iframe {
width: 100%;
border: var(--border-in-light);
border-radius: 6px;
background-color: var(--gray);
}

View File

@ -0,0 +1,266 @@
import {
useEffect,
useState,
useRef,
useMemo,
forwardRef,
useImperativeHandle,
} from "react";
import { useParams } from "react-router";
import { IconButton } from "./button";
import { nanoid } from "nanoid";
import ExportIcon from "../icons/share.svg";
import CopyIcon from "../icons/copy.svg";
import DownloadIcon from "../icons/download.svg";
import GithubIcon from "../icons/github.svg";
import LoadingButtonIcon from "../icons/loading.svg";
import ReloadButtonIcon from "../icons/reload.svg";
import Locale from "../locales";
import { Modal, showToast } from "./ui-lib";
import { copyToClipboard, downloadAs } from "../utils";
import { Path, ApiPath, REPO_URL } from "@/app/constant";
import { Loading } from "./home";
import styles from "./artifacts.module.scss";
type HTMLPreviewProps = {
code: string;
autoHeight?: boolean;
height?: number | string;
onLoad?: (title?: string) => void;
};
export type HTMLPreviewHander = {
reload: () => void;
};
export const HTMLPreview = forwardRef<HTMLPreviewHander, HTMLPreviewProps>(
function HTMLPreview(props, ref) {
const iframeRef = useRef<HTMLIFrameElement>(null);
const [frameId, setFrameId] = useState<string>(nanoid());
const [iframeHeight, setIframeHeight] = useState(600);
const [title, setTitle] = useState("");
/*
* https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an
* 1. using srcdoc
* 2. using src with dataurl:
* easy to share
* length limit (Data URIs cannot be larger than 32,768 characters.)
*/
useEffect(() => {
const handleMessage = (e: any) => {
const { id, height, title } = e.data;
setTitle(title);
if (id == frameId) {
setIframeHeight(height);
}
};
window.addEventListener("message", handleMessage);
return () => {
window.removeEventListener("message", handleMessage);
};
}, [frameId]);
useImperativeHandle(ref, () => ({
reload: () => {
setFrameId(nanoid());
},
}));
const height = useMemo(() => {
if (!props.autoHeight) return props.height || 600;
if (typeof props.height === "string") {
return props.height;
}
const parentHeight = props.height || 600;
return iframeHeight + 40 > parentHeight
? parentHeight
: iframeHeight + 40;
}, [props.autoHeight, props.height, iframeHeight]);
const srcDoc = useMemo(() => {
const script = `<script>window.addEventListener("DOMContentLoaded", () => new ResizeObserver((entries) => parent.postMessage({id: '${frameId}', height: entries[0].target.clientHeight}, '*')).observe(document.body))</script>`;
if (props.code.includes("<!DOCTYPE html>")) {
props.code.replace("<!DOCTYPE html>", "<!DOCTYPE html>" + script);
}
return script + props.code;
}, [props.code, frameId]);
const handleOnLoad = () => {
if (props?.onLoad) {
props.onLoad(title);
}
};
return (
<iframe
className={styles["artifacts-iframe"]}
key={frameId}
ref={iframeRef}
sandbox="allow-forms allow-modals allow-scripts"
style={{ height }}
srcDoc={srcDoc}
onLoad={handleOnLoad}
/>
);
},
);
export function ArtifactsShareButton({
getCode,
id,
style,
fileName,
}: {
getCode: () => string;
id?: string;
style?: any;
fileName?: string;
}) {
const [loading, setLoading] = useState(false);
const [name, setName] = useState(id);
const [show, setShow] = useState(false);
const shareUrl = useMemo(
() => [location.origin, "#", Path.Artifacts, "/", name].join(""),
[name],
);
const upload = (code: string) =>
id
? Promise.resolve({ id })
: fetch(ApiPath.Artifacts, {
method: "POST",
body: code,
})
.then((res) => res.json())
.then(({ id }) => {
if (id) {
return { id };
}
throw Error();
})
.catch((e) => {
showToast(Locale.Export.Artifacts.Error);
});
return (
<>
<div className="window-action-button" style={style}>
<IconButton
icon={loading ? <LoadingButtonIcon /> : <ExportIcon />}
bordered
title={Locale.Export.Artifacts.Title}
onClick={() => {
if (loading) return;
setLoading(true);
upload(getCode())
.then((res) => {
if (res?.id) {
setShow(true);
setName(res?.id);
}
})
.finally(() => setLoading(false));
}}
/>
</div>
{show && (
<div className="modal-mask">
<Modal
title={Locale.Export.Artifacts.Title}
onClose={() => setShow(false)}
actions={[
<IconButton
key="download"
icon={<DownloadIcon />}
bordered
text={Locale.Export.Download}
onClick={() => {
downloadAs(getCode(), `${fileName || name}.html`).then(() =>
setShow(false),
);
}}
/>,
<IconButton
key="copy"
icon={<CopyIcon />}
bordered
text={Locale.Chat.Actions.Copy}
onClick={() => {
copyToClipboard(shareUrl).then(() => setShow(false));
}}
/>,
]}
>
<div>
<a target="_blank" href={shareUrl}>
{shareUrl}
</a>
</div>
</Modal>
</div>
)}
</>
);
}
export function Artifacts() {
const { id } = useParams();
const [code, setCode] = useState("");
const [loading, setLoading] = useState(true);
const [fileName, setFileName] = useState("");
const previewRef = useRef<HTMLPreviewHander>(null);
useEffect(() => {
if (id) {
fetch(`${ApiPath.Artifacts}?id=${id}`)
.then((res) => {
if (res.status > 300) {
throw Error("can not get content");
}
return res;
})
.then((res) => res.text())
.then(setCode)
.catch((e) => {
showToast(Locale.Export.Artifacts.Error);
});
}
}, [id]);
return (
<div className={styles["artifacts"]}>
<div className={styles["artifacts-header"]}>
<a href={REPO_URL} target="_blank" rel="noopener noreferrer">
<IconButton bordered icon={<GithubIcon />} shadow />
</a>
<IconButton
bordered
style={{ marginLeft: 20 }}
icon={<ReloadButtonIcon />}
shadow
onClick={() => previewRef.current?.reload()}
/>
<div className={styles["artifacts-title"]}>NextChat Artifacts</div>
<ArtifactsShareButton
id={id}
getCode={() => code}
fileName={fileName}
/>
</div>
<div className={styles["artifacts-content"]}>
{loading && <Loading />}
{code && (
<HTMLPreview
code={code}
ref={previewRef}
autoHeight={false}
height={"100%"}
onLoad={(title) => {
setFileName(title as string);
setLoading(false);
}}
/>
)}
</div>
</div>
);
}

View File

@ -1,12 +1,70 @@
.auth-page { .auth-page {
display: flex; display: flex;
justify-content: center; justify-content: flex-start;
align-items: center; align-items: center;
height: 100%; height: 100%;
width: 100%; width: 100%;
flex-direction: column; flex-direction: column;
.top-banner {
position: relative;
width: 100%;
display: flex;
justify-content: center;
align-items: center;
padding: 12px 64px;
box-sizing: border-box;
background: var(--second);
.top-banner-inner {
display: flex;
justify-content: center;
align-items: center;
font-size: 14px;
line-height: 150%;
span {
gap: 8px;
a {
display: inline-flex;
align-items: center;
text-decoration: none;
margin-left: 8px;
color: var(--primary);
}
}
}
.top-banner-close {
cursor: pointer;
position: absolute;
top: 50%;
right: 48px;
transform: translateY(-50%);
}
}
@media (max-width: 600px) {
.top-banner {
padding: 12px 24px 12px 12px;
.top-banner-close {
right: 10px;
}
.top-banner-inner {
.top-banner-logo {
margin-right: 8px;
}
}
}
}
.auth-header {
display: flex;
justify-content: space-between;
width: 100%;
padding: 10px;
box-sizing: border-box;
animation: slide-in-from-top ease 0.3s;
}
.auth-logo { .auth-logo {
margin-top: 10vh;
transform: scale(1.4); transform: scale(1.4);
} }
@ -14,6 +72,7 @@
font-size: 24px; font-size: 24px;
font-weight: bold; font-weight: bold;
line-height: 2; line-height: 2;
margin-bottom: 1vh;
} }
.auth-tips { .auth-tips {
@ -24,6 +83,10 @@
margin: 3vh 0; margin: 3vh 0;
} }
.auth-input-second {
margin: 0 0 3vh 0;
}
.auth-actions { .auth-actions {
display: flex; display: flex;
justify-content: center; justify-content: center;

View File

@ -1,21 +1,37 @@
import styles from "./auth.module.scss"; import styles from "./auth.module.scss";
import { IconButton } from "./button"; import { IconButton } from "./button";
import { useState, useEffect } from "react";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { Path } from "../constant"; import { Path, SAAS_CHAT_URL } from "../constant";
import { useAccessStore } from "../store"; import { useAccessStore } from "../store";
import Locale from "../locales"; import Locale from "../locales";
import Delete from "../icons/close.svg";
import Arrow from "../icons/arrow.svg";
import Logo from "../icons/logo.svg";
import { useMobileScreen } from "@/app/utils";
import BotIcon from "../icons/bot.svg"; import BotIcon from "../icons/bot.svg";
import { useEffect } from "react";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { PasswordInput } from "./ui-lib";
import LeftIcon from "@/app/icons/left.svg";
import { safeLocalStorage } from "@/app/utils";
import {
trackSettingsPageGuideToCPaymentClick,
trackAuthorizationPageButtonToCPaymentClick,
} from "../utils/auth-settings-events";
import clsx from "clsx";
const storage = safeLocalStorage();
export function AuthPage() { export function AuthPage() {
const navigate = useNavigate(); const navigate = useNavigate();
const accessStore = useAccessStore(); const accessStore = useAccessStore();
const goHome = () => navigate(Path.Home); const goHome = () => navigate(Path.Home);
const goChat = () => navigate(Path.Chat); const goChat = () => navigate(Path.Chat);
const goSaas = () => {
trackAuthorizationPageButtonToCPaymentClick();
window.location.href = SAAS_CHAT_URL;
};
const resetAccessCode = () => { const resetAccessCode = () => {
accessStore.update((access) => { accessStore.update((access) => {
access.openaiApiKey = ""; access.openaiApiKey = "";
@ -32,43 +48,58 @@ export function AuthPage() {
return ( return (
<div className={styles["auth-page"]}> <div className={styles["auth-page"]}>
<div className={`no-dark ${styles["auth-logo"]}`}> <TopBanner></TopBanner>
<div className={styles["auth-header"]}>
<IconButton
icon={<LeftIcon />}
text={Locale.Auth.Return}
onClick={() => navigate(Path.Home)}
></IconButton>
</div>
<div className={clsx("no-dark", styles["auth-logo"])}>
<BotIcon /> <BotIcon />
</div> </div>
<div className={styles["auth-title"]}>{Locale.Auth.Title}</div> <div className={styles["auth-title"]}>{Locale.Auth.Title}</div>
<div className={styles["auth-tips"]}>{Locale.Auth.Tips}</div> <div className={styles["auth-tips"]}>{Locale.Auth.Tips}</div>
<input <PasswordInput
className={styles["auth-input"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Auth.Input} aria-label={Locale.Auth.Input}
value={accessStore.accessCode} value={accessStore.accessCode}
type="text"
placeholder={Locale.Auth.Input}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.accessCode = e.currentTarget.value), (access) => (access.accessCode = e.currentTarget.value),
); );
}} }}
/> />
{!accessStore.hideUserApiKey ? ( {!accessStore.hideUserApiKey ? (
<> <>
<div className={styles["auth-tips"]}>{Locale.Auth.SubTips}</div> <div className={styles["auth-tips"]}>{Locale.Auth.SubTips}</div>
<input <PasswordInput
className={styles["auth-input"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder} aria-label={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
value={accessStore.openaiApiKey} value={accessStore.openaiApiKey}
type="text"
placeholder={Locale.Settings.Access.OpenAI.ApiKey.Placeholder}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.openaiApiKey = e.currentTarget.value), (access) => (access.openaiApiKey = e.currentTarget.value),
); );
}} }}
/> />
<input <PasswordInput
className={styles["auth-input"]} style={{ marginTop: "3vh", marginBottom: "3vh" }}
type="password" aria={Locale.Settings.ShowPassword}
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder} aria-label={Locale.Settings.Access.Google.ApiKey.Placeholder}
value={accessStore.googleApiKey} value={accessStore.googleApiKey}
type="text"
placeholder={Locale.Settings.Access.Google.ApiKey.Placeholder}
onChange={(e) => { onChange={(e) => {
accessStore.update( accessStore.update(
(access) => (access.googleApiKey = e.currentTarget.value), (access) => (access.googleApiKey = e.currentTarget.value),
@ -85,13 +116,74 @@ export function AuthPage() {
onClick={goChat} onClick={goChat}
/> />
<IconButton <IconButton
text={Locale.Auth.Later} text={Locale.Auth.SaasTips}
onClick={() => { onClick={() => {
resetAccessCode(); goSaas();
goHome();
}} }}
/> />
</div> </div>
</div> </div>
); );
} }
function TopBanner() {
const [isHovered, setIsHovered] = useState(false);
const [isVisible, setIsVisible] = useState(true);
const isMobile = useMobileScreen();
useEffect(() => {
// 检查 localStorage 中是否有标记
const bannerDismissed = storage.getItem("bannerDismissed");
// 如果标记不存在,存储默认值并显示横幅
if (!bannerDismissed) {
storage.setItem("bannerDismissed", "false");
setIsVisible(true); // 显示横幅
} else if (bannerDismissed === "true") {
// 如果标记为 "true",则隐藏横幅
setIsVisible(false);
}
}, []);
const handleMouseEnter = () => {
setIsHovered(true);
};
const handleMouseLeave = () => {
setIsHovered(false);
};
const handleClose = () => {
setIsVisible(false);
storage.setItem("bannerDismissed", "true");
};
if (!isVisible) {
return null;
}
return (
<div
className={styles["top-banner"]}
onMouseEnter={handleMouseEnter}
onMouseLeave={handleMouseLeave}
>
<div className={clsx(styles["top-banner-inner"], "no-dark")}>
<Logo className={styles["top-banner-logo"]}></Logo>
<span>
{Locale.Auth.TopTips}
<a
href={SAAS_CHAT_URL}
rel="stylesheet"
onClick={() => {
trackSettingsPageGuideToCPaymentClick();
}}
>
{Locale.Settings.Access.SaasStart.ChatNow}
<Arrow style={{ marginLeft: "4px" }} />
</a>
</span>
</div>
{(isHovered || isMobile) && (
<Delete className={styles["top-banner-close"]} onClick={handleClose} />
)}
</div>
);
}

View File

@ -5,7 +5,6 @@
align-items: center; align-items: center;
justify-content: center; justify-content: center;
padding: 10px; padding: 10px;
cursor: pointer; cursor: pointer;
transition: all 0.3s ease; transition: all 0.3s ease;
overflow: hidden; overflow: hidden;

View File

@ -1,6 +1,8 @@
import * as React from "react"; import * as React from "react";
import styles from "./button.module.scss"; import styles from "./button.module.scss";
import { CSSProperties } from "react";
import clsx from "clsx";
export type ButtonType = "primary" | "danger" | null; export type ButtonType = "primary" | "danger" | null;
@ -16,35 +18,48 @@ export function IconButton(props: {
disabled?: boolean; disabled?: boolean;
tabIndex?: number; tabIndex?: number;
autoFocus?: boolean; autoFocus?: boolean;
style?: CSSProperties;
aria?: string;
}) { }) {
return ( return (
<button <button
className={ className={clsx(
styles["icon-button"] + "clickable",
` ${props.bordered && styles.border} ${props.shadow && styles.shadow} ${ styles["icon-button"],
props.className ?? "" {
} clickable ${styles[props.type ?? ""]}` [styles.border]: props.bordered,
} [styles.shadow]: props.shadow,
},
styles[props.type ?? ""],
props.className,
)}
onClick={props.onClick} onClick={props.onClick}
title={props.title} title={props.title}
disabled={props.disabled} disabled={props.disabled}
role="button" role="button"
tabIndex={props.tabIndex} tabIndex={props.tabIndex}
autoFocus={props.autoFocus} autoFocus={props.autoFocus}
style={props.style}
aria-label={props.aria}
> >
{props.icon && ( {props.icon && (
<div <div
className={ aria-label={props.text || props.title}
styles["icon-button-icon"] + className={clsx(styles["icon-button-icon"], {
` ${props.type === "primary" && "no-dark"}` "no-dark": props.type === "primary",
} })}
> >
{props.icon} {props.icon}
</div> </div>
)} )}
{props.text && ( {props.text && (
<div className={styles["icon-button-text"]}>{props.text}</div> <div
aria-label={props.text || props.title}
className={styles["icon-button-text"]}
>
{props.text}
</div>
)} )}
</button> </button>
); );

View File

@ -1,5 +1,4 @@
import DeleteIcon from "../icons/delete.svg"; import DeleteIcon from "../icons/delete.svg";
import BotIcon from "../icons/bot.svg";
import styles from "./home.module.scss"; import styles from "./home.module.scss";
import { import {
@ -12,13 +11,14 @@ import {
import { useChatStore } from "../store"; import { useChatStore } from "../store";
import Locale from "../locales"; import Locale from "../locales";
import { Link, useNavigate } from "react-router-dom"; import { useLocation, useNavigate } from "react-router-dom";
import { Path } from "../constant"; import { Path } from "../constant";
import { MaskAvatar } from "./mask"; import { MaskAvatar } from "./mask";
import { Mask } from "../store/mask"; import { Mask } from "../store/mask";
import { useRef, useEffect } from "react"; import { useRef, useEffect } from "react";
import { showConfirm } from "./ui-lib"; import { showConfirm } from "./ui-lib";
import { useMobileScreen } from "../utils"; import { useMobileScreen } from "../utils";
import clsx from "clsx";
export function ChatItem(props: { export function ChatItem(props: {
onClick?: () => void; onClick?: () => void;
@ -40,13 +40,17 @@ export function ChatItem(props: {
}); });
} }
}, [props.selected]); }, [props.selected]);
const { pathname: currentPath } = useLocation();
return ( return (
<Draggable draggableId={`${props.id}`} index={props.index}> <Draggable draggableId={`${props.id}`} index={props.index}>
{(provided) => ( {(provided) => (
<div <div
className={`${styles["chat-item"]} ${ className={clsx(styles["chat-item"], {
props.selected && styles["chat-item-selected"] [styles["chat-item-selected"]]:
}`} props.selected &&
(currentPath === Path.Chat || currentPath === Path.Home),
})}
onClick={props.onClick} onClick={props.onClick}
ref={(ele) => { ref={(ele) => {
draggableRef.current = ele; draggableRef.current = ele;
@ -60,7 +64,7 @@ export function ChatItem(props: {
> >
{props.narrow ? ( {props.narrow ? (
<div className={styles["chat-item-narrow"]}> <div className={styles["chat-item-narrow"]}>
<div className={styles["chat-item-avatar"] + " no-dark"}> <div className={clsx(styles["chat-item-avatar"], "no-dark")}>
<MaskAvatar <MaskAvatar
avatar={props.mask.avatar} avatar={props.mask.avatar}
model={props.mask.modelConfig.model} model={props.mask.modelConfig.model}

View File

@ -1,8 +1,58 @@
@import "../styles/animation.scss"; @import "../styles/animation.scss";
.attach-images {
position: absolute;
left: 30px;
bottom: 32px;
display: flex;
}
.attach-image {
cursor: default;
width: 64px;
height: 64px;
border: rgba($color: #888, $alpha: 0.2) 1px solid;
border-radius: 5px;
margin-right: 10px;
background-size: cover;
background-position: center;
background-color: var(--white);
.attach-image-mask {
width: 100%;
height: 100%;
opacity: 0;
transition: all ease 0.2s;
}
.attach-image-mask:hover {
opacity: 1;
}
.delete-image {
width: 24px;
height: 24px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
border-radius: 5px;
float: right;
background-color: var(--white);
}
}
.chat-input-actions { .chat-input-actions {
display: flex; display: flex;
flex-wrap: wrap; flex-wrap: wrap;
justify-content: space-between;
gap: 5px;
&-end {
display: flex;
margin-left: auto;
gap: 5px;
}
.chat-input-action { .chat-input-action {
display: inline-flex; display: inline-flex;
@ -20,10 +70,6 @@
width: var(--icon-width); width: var(--icon-width);
overflow: hidden; overflow: hidden;
&:not(:last-child) {
margin-right: 5px;
}
.text { .text {
white-space: nowrap; white-space: nowrap;
padding-left: 5px; padding-left: 5px;
@ -306,6 +352,12 @@
flex-wrap: nowrap; flex-wrap: nowrap;
} }
} }
.chat-model-name {
font-size: 12px;
color: var(--black);
margin-left: 6px;
}
} }
.chat-message-container { .chat-message-container {
@ -349,6 +401,7 @@
padding: 7px; padding: 7px;
} }
} }
/* Specific styles for iOS devices */ /* Specific styles for iOS devices */
@media screen and (max-device-width: 812px) and (-webkit-min-device-pixel-ratio: 2) { @media screen and (max-device-width: 812px) and (-webkit-min-device-pixel-ratio: 2) {
@supports (-webkit-touch-callout: none) { @supports (-webkit-touch-callout: none) {
@ -366,6 +419,21 @@
margin-top: 5px; margin-top: 5px;
} }
.chat-message-tools {
font-size: 12px;
color: #aaa;
line-height: 1.5;
margin-top: 5px;
.chat-message-tool {
display: flex;
align-items: end;
svg {
margin-left: 5px;
margin-right: 5px;
}
}
}
.chat-message-item { .chat-message-item {
box-sizing: border-box; box-sizing: border-box;
max-width: 100%; max-width: 100%;
@ -381,6 +449,87 @@
transition: all ease 0.3s; transition: all ease 0.3s;
} }
.chat-message-audio {
display: flex;
align-items: center;
justify-content: space-between;
border-radius: 10px;
background-color: rgba(0, 0, 0, 0.05);
border: var(--border-in-light);
position: relative;
transition: all ease 0.3s;
margin-top: 10px;
font-size: 14px;
user-select: text;
word-break: break-word;
box-sizing: border-box;
audio {
height: 30px; /* 调整高度 */
}
}
.chat-message-item-image {
width: 100%;
margin-top: 10px;
}
.chat-message-item-images {
width: 100%;
display: grid;
justify-content: left;
grid-gap: 10px;
grid-template-columns: repeat(var(--image-count), auto);
margin-top: 10px;
}
.chat-message-item-image-multi {
object-fit: cover;
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}
.chat-message-item-image,
.chat-message-item-image-multi {
box-sizing: border-box;
border-radius: 10px;
border: rgba($color: #888, $alpha: 0.2) 1px solid;
}
@media only screen and (max-width: 600px) {
$calc-image-width: calc(100vw / 3 * 2 / var(--image-count));
.chat-message-item-image-multi {
width: $calc-image-width;
height: $calc-image-width;
}
.chat-message-item-image {
max-width: calc(100vw / 3 * 2);
}
}
@media screen and (min-width: 600px) {
$max-image-width: calc(
calc(1200px - var(--sidebar-width)) / 3 * 2 / var(--image-count)
);
$image-width: calc(
calc(var(--window-width) - var(--sidebar-width)) / 3 * 2 /
var(--image-count)
);
.chat-message-item-image-multi {
width: $image-width;
height: $image-width;
max-width: $max-image-width;
max-height: $max-image-width;
}
.chat-message-item-image {
max-width: calc(calc(1200px - var(--sidebar-width)) / 3 * 2);
}
}
.chat-message-action-date { .chat-message-action-date {
font-size: 12px; font-size: 12px;
opacity: 0.2; opacity: 0.2;
@ -460,6 +609,7 @@
@include single-line(); @include single-line();
} }
.hint-content { .hint-content {
font-size: 12px; font-size: 12px;
@ -474,15 +624,26 @@
} }
.chat-input-panel-inner { .chat-input-panel-inner {
cursor: text;
display: flex; display: flex;
flex: 1; flex: 1;
border-radius: 10px;
border: var(--border-in-light);
}
.chat-input-panel-inner-attach {
padding-bottom: 80px;
}
.chat-input-panel-inner:has(.chat-input:focus) {
border: 1px solid var(--primary);
} }
.chat-input { .chat-input {
height: 100%; height: 100%;
width: 100%; width: 100%;
border-radius: 10px; border-radius: 10px;
border: var(--border-in-light); border: none;
box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.03); box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.03);
background-color: var(--white); background-color: var(--white);
color: var(--black); color: var(--black);
@ -495,7 +656,6 @@
} }
.chat-input:focus { .chat-input:focus {
border: 1px solid var(--primary);
} }
.chat-input-send { .chat-input-send {
@ -516,3 +676,78 @@
bottom: 30px; bottom: 30px;
} }
} }
.shortcut-key-container {
padding: 10px;
overflow-y: auto;
display: flex;
flex-direction: column;
}
.shortcut-key-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
gap: 16px;
}
.shortcut-key-item {
display: flex;
justify-content: space-between;
align-items: center;
overflow: hidden;
padding: 10px;
background-color: var(--white);
}
.shortcut-key-title {
font-size: 14px;
color: var(--black);
}
.shortcut-key-keys {
display: flex;
gap: 8px;
}
.shortcut-key {
display: flex;
align-items: center;
justify-content: center;
border: var(--border-in-light);
border-radius: 8px;
padding: 4px;
background-color: var(--gray);
min-width: 32px;
}
.shortcut-key span {
font-size: 12px;
color: var(--black);
}
.chat-main {
display: flex;
height: 100%;
width: 100%;
position: relative;
overflow: hidden;
.chat-body-container {
height: 100%;
display: flex;
flex-direction: column;
flex: 1;
width: 100%;
}
.chat-side-panel {
position: absolute;
inset: 0;
background: var(--white);
overflow: hidden;
z-index: 10;
transform: translateX(100%);
transition: all ease 0.3s;
&-show {
transform: translateX(0);
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,21 @@ import EmojiPicker, {
import { ModelType } from "../store"; import { ModelType } from "../store";
import BotIcon from "../icons/bot.svg"; import BotIconDefault from "../icons/llm-icons/default.svg";
import BlackBotIcon from "../icons/black-bot.svg"; import BotIconOpenAI from "../icons/llm-icons/openai.svg";
import BotIconGemini from "../icons/llm-icons/gemini.svg";
import BotIconGemma from "../icons/llm-icons/gemma.svg";
import BotIconClaude from "../icons/llm-icons/claude.svg";
import BotIconMeta from "../icons/llm-icons/meta.svg";
import BotIconMistral from "../icons/llm-icons/mistral.svg";
import BotIconDeepseek from "../icons/llm-icons/deepseek.svg";
import BotIconMoonshot from "../icons/llm-icons/moonshot.svg";
import BotIconQwen from "../icons/llm-icons/qwen.svg";
import BotIconWenxin from "../icons/llm-icons/wenxin.svg";
import BotIconGrok from "../icons/llm-icons/grok.svg";
import BotIconHunyuan from "../icons/llm-icons/hunyuan.svg";
import BotIconDoubao from "../icons/llm-icons/doubao.svg";
import BotIconChatglm from "../icons/llm-icons/chatglm.svg";
export function getEmojiUrl(unified: string, style: EmojiStyle) { export function getEmojiUrl(unified: string, style: EmojiStyle) {
// Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis // Whoever owns this Content Delivery Network (CDN), I am using your CDN to serve emojis
@ -21,6 +34,7 @@ export function AvatarPicker(props: {
}) { }) {
return ( return (
<EmojiPicker <EmojiPicker
width={"100%"}
lazyLoadEmojis lazyLoadEmojis
theme={EmojiTheme.AUTO} theme={EmojiTheme.AUTO}
getEmojiUrl={getEmojiUrl} getEmojiUrl={getEmojiUrl}
@ -32,14 +46,55 @@ export function AvatarPicker(props: {
} }
export function Avatar(props: { model?: ModelType; avatar?: string }) { export function Avatar(props: { model?: ModelType; avatar?: string }) {
let LlmIcon = BotIconDefault;
if (props.model) { if (props.model) {
const modelName = props.model.toLowerCase();
if (
modelName.startsWith("gpt") ||
modelName.startsWith("chatgpt") ||
modelName.startsWith("dall-e") ||
modelName.startsWith("dalle") ||
modelName.startsWith("o1") ||
modelName.startsWith("o3")
) {
LlmIcon = BotIconOpenAI;
} else if (modelName.startsWith("gemini")) {
LlmIcon = BotIconGemini;
} else if (modelName.startsWith("gemma")) {
LlmIcon = BotIconGemma;
} else if (modelName.startsWith("claude")) {
LlmIcon = BotIconClaude;
} else if (modelName.includes("llama")) {
LlmIcon = BotIconMeta;
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
LlmIcon = BotIconMistral;
} else if (modelName.includes("deepseek")) {
LlmIcon = BotIconDeepseek;
} else if (modelName.startsWith("moonshot")) {
LlmIcon = BotIconMoonshot;
} else if (modelName.startsWith("qwen")) {
LlmIcon = BotIconQwen;
} else if (modelName.startsWith("ernie")) {
LlmIcon = BotIconWenxin;
} else if (modelName.startsWith("grok")) {
LlmIcon = BotIconGrok;
} else if (modelName.startsWith("hunyuan")) {
LlmIcon = BotIconHunyuan;
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao;
} else if (
modelName.includes("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {
LlmIcon = BotIconChatglm;
}
return ( return (
<div className="no-dark"> <div className="no-dark">
{props.model?.startsWith("gpt-4") ? ( <LlmIcon className="user-avatar" width={30} height={30} />
<BlackBotIcon className="user-avatar" />
) : (
<BotIcon className="user-avatar" />
)}
</div> </div>
); );
} }

View File

@ -1,3 +1,5 @@
"use client";
import React from "react"; import React from "react";
import { IconButton } from "./button"; import { IconButton } from "./button";
import GithubIcon from "../icons/github.svg"; import GithubIcon from "../icons/github.svg";
@ -6,6 +8,7 @@ import { ISSUE_URL } from "../constant";
import Locale from "../locales"; import Locale from "../locales";
import { showConfirm } from "./ui-lib"; import { showConfirm } from "./ui-lib";
import { useSyncStore } from "../store/sync"; import { useSyncStore } from "../store/sync";
import { useChatStore } from "../store/chat";
interface IErrorBoundaryState { interface IErrorBoundaryState {
hasError: boolean; hasError: boolean;
@ -28,8 +31,7 @@ export class ErrorBoundary extends React.Component<any, IErrorBoundaryState> {
try { try {
useSyncStore.getState().export(); useSyncStore.getState().export();
} finally { } finally {
localStorage.clear(); useChatStore.getState().clearAllData();
location.reload();
} }
} }

View File

@ -94,6 +94,7 @@
button { button {
flex-grow: 1; flex-grow: 1;
&:not(:last-child) { &:not(:last-child) {
margin-right: 10px; margin-right: 10px;
} }
@ -190,6 +191,59 @@
pre { pre {
overflow: hidden; overflow: hidden;
} }
.message-image {
width: 100%;
margin-top: 10px;
}
.message-images {
display: grid;
justify-content: left;
grid-gap: 10px;
grid-template-columns: repeat(var(--image-count), auto);
margin-top: 10px;
}
@media screen and (max-width: 600px) {
$image-width: calc(calc(100vw/2)/var(--image-count));
.message-image-multi {
width: $image-width;
height: $image-width;
}
.message-image {
max-width: calc(100vw/3*2);
}
}
@media screen and (min-width: 600px) {
$max-image-width: calc(900px/3*2/var(--image-count));
$image-width: calc(80vw/3*2/var(--image-count));
.message-image-multi {
width: $image-width;
height: $image-width;
max-width: $max-image-width;
max-height: $max-image-width;
}
.message-image {
max-width: calc(100vw/3*2);
}
}
.message-image-multi {
object-fit: cover;
}
.message-image,
.message-image-multi {
box-sizing: border-box;
border-radius: 10px;
border: rgba($color: #888, $alpha: 0.2) 1px solid;
}
} }
&-assistant { &-assistant {
@ -213,6 +267,5 @@
} }
} }
.default-theme { .default-theme {}
} }
}

View File

@ -1,5 +1,5 @@
/* eslint-disable @next/next/no-img-element */ /* eslint-disable @next/next/no-img-element */
import { ChatMessage, ModelType, useAppConfig, useChatStore } from "../store"; import { ChatMessage, useAppConfig, useChatStore } from "../store";
import Locale from "../locales"; import Locale from "../locales";
import styles from "./exporter.module.scss"; import styles from "./exporter.module.scss";
import { import {
@ -12,13 +12,17 @@ import {
showToast, showToast,
} from "./ui-lib"; } from "./ui-lib";
import { IconButton } from "./button"; import { IconButton } from "./button";
import { copyToClipboard, downloadAs, useMobileScreen } from "../utils"; import {
copyToClipboard,
downloadAs,
getMessageImages,
useMobileScreen,
} from "../utils";
import CopyIcon from "../icons/copy.svg"; import CopyIcon from "../icons/copy.svg";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import ChatGptIcon from "../icons/chatgpt.png"; import ChatGptIcon from "../icons/chatgpt.png";
import ShareIcon from "../icons/share.svg"; import ShareIcon from "../icons/share.svg";
import BotIcon from "../icons/bot.png";
import DownloadIcon from "../icons/download.svg"; import DownloadIcon from "../icons/download.svg";
import { useEffect, useMemo, useRef, useState } from "react"; import { useEffect, useMemo, useRef, useState } from "react";
@ -28,12 +32,14 @@ import dynamic from "next/dynamic";
import NextImage from "next/image"; import NextImage from "next/image";
import { toBlob, toPng } from "html-to-image"; import { toBlob, toPng } from "html-to-image";
import { DEFAULT_MASK_AVATAR } from "../store/mask";
import { prettyObject } from "../utils/format"; import { prettyObject } from "../utils/format";
import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { ClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { getMessageTextContent } from "../utils";
import { MaskAvatar } from "./mask";
import clsx from "clsx";
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />, loading: () => <LoadingIcon />,
@ -112,9 +118,10 @@ function Steps<
return ( return (
<div <div
key={i} key={i}
className={`${styles["step"]} ${ className={clsx("clickable", styles["step"], {
styles[i <= props.index ? "step-finished" : ""] [styles["step-finished"]]: i <= props.index,
} ${i === props.index && styles["step-current"]} clickable`} [styles["step-current"]]: i === props.index,
})}
onClick={() => { onClick={() => {
props.onStepChange?.(i); props.onStepChange?.(i);
}} }}
@ -287,7 +294,7 @@ export function RenderExport(props: {
id={`${m.role}:${i}`} id={`${m.role}:${i}`}
className={EXPORT_MESSAGE_CLASS_NAME} className={EXPORT_MESSAGE_CLASS_NAME}
> >
<Markdown content={m.content} defaultShow /> <Markdown content={getMessageTextContent(m)} defaultShow />
</div> </div>
))} ))}
</div> </div>
@ -306,12 +313,7 @@ export function PreviewActions(props: {
const onRenderMsgs = (msgs: ChatMessage[]) => { const onRenderMsgs = (msgs: ChatMessage[]) => {
setShouldExport(false); setShouldExport(false);
var api: ClientApi; const api: ClientApi = getClientApi(config.modelConfig.providerName);
if (config.modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);
} else {
api = new ClientApi(ModelProvider.GPT);
}
api api
.share(msgs) .share(msgs)
@ -404,22 +406,6 @@ export function PreviewActions(props: {
); );
} }
function ExportAvatar(props: { avatar: string }) {
if (props.avatar === DEFAULT_MASK_AVATAR) {
return (
<img
src={BotIcon.src}
width={30}
height={30}
alt="bot"
className="user-avatar"
/>
);
}
return <Avatar avatar={props.avatar} />;
}
export function ImagePreviewer(props: { export function ImagePreviewer(props: {
messages: ChatMessage[]; messages: ChatMessage[];
topic: string; topic: string;
@ -524,11 +510,11 @@ export function ImagePreviewer(props: {
messages={props.messages} messages={props.messages}
/> />
<div <div
className={`${styles["preview-body"]} ${styles["default-theme"]}`} className={clsx(styles["preview-body"], styles["default-theme"])}
ref={previewRef} ref={previewRef}
> >
<div className={styles["chat-info"]}> <div className={styles["chat-info"]}>
<div className={styles["logo"] + " no-dark"}> <div className={clsx(styles["logo"], "no-dark")}>
<NextImage <NextImage
src={ChatGptIcon.src} src={ChatGptIcon.src}
alt="logo" alt="logo"
@ -540,12 +526,15 @@ export function ImagePreviewer(props: {
<div> <div>
<div className={styles["main-title"]}>NextChat</div> <div className={styles["main-title"]}>NextChat</div>
<div className={styles["sub-title"]}> <div className={styles["sub-title"]}>
github.com/Yidadaa/ChatGPT-Next-Web github.com/ChatGPTNextWeb/ChatGPT-Next-Web
</div> </div>
<div className={styles["icons"]}> <div className={styles["icons"]}>
<ExportAvatar avatar={config.avatar} /> <MaskAvatar avatar={config.avatar} />
<span className={styles["icon-space"]}>&</span> <span className={styles["icon-space"]}>&</span>
<ExportAvatar avatar={mask.avatar} /> <MaskAvatar
avatar={mask.avatar}
model={session.mask.modelConfig.model}
/>
</div> </div>
</div> </div>
<div> <div>
@ -569,21 +558,54 @@ export function ImagePreviewer(props: {
{props.messages.map((m, i) => { {props.messages.map((m, i) => {
return ( return (
<div <div
className={styles["message"] + " " + styles["message-" + m.role]} className={clsx(styles["message"], styles["message-" + m.role])}
key={i} key={i}
> >
<div className={styles["avatar"]}> <div className={styles["avatar"]}>
<ExportAvatar {m.role === "user" ? (
avatar={m.role === "user" ? config.avatar : mask.avatar} <Avatar avatar={config.avatar}></Avatar>
/> ) : (
<MaskAvatar
avatar={session.mask.avatar}
model={m.model || session.mask.modelConfig.model}
/>
)}
</div> </div>
<div className={styles["body"]}> <div className={styles["body"]}>
<Markdown <Markdown
content={m.content} content={getMessageTextContent(m)}
fontSize={config.fontSize} fontSize={config.fontSize}
fontFamily={config.fontFamily}
defaultShow defaultShow
/> />
{getMessageImages(m).length == 1 && (
<img
key={i}
src={getMessageImages(m)[0]}
alt="message"
className={styles["message-image"]}
/>
)}
{getMessageImages(m).length > 1 && (
<div
className={styles["message-images"]}
style={
{
"--image-count": getMessageImages(m).length,
} as React.CSSProperties
}
>
{getMessageImages(m).map((src, i) => (
<img
key={i}
src={src}
alt="message"
className={styles["message-image-multi"]}
/>
))}
</div>
)}
</div> </div>
</div> </div>
); );
@ -602,8 +624,10 @@ export function MarkdownPreviewer(props: {
props.messages props.messages
.map((m) => { .map((m) => {
return m.role === "user" return m.role === "user"
? `## ${Locale.Export.MessageFromYou}:\n${m.content}` ? `## ${Locale.Export.MessageFromYou}:\n${getMessageTextContent(m)}`
: `## ${Locale.Export.MessageFromChatGPT}:\n${m.content.trim()}`; : `## ${Locale.Export.MessageFromChatGPT}:\n${getMessageTextContent(
m,
).trim()}`;
}) })
.join("\n\n"); .join("\n\n");

View File

@ -137,12 +137,21 @@
position: relative; position: relative;
padding-top: 20px; padding-top: 20px;
padding-bottom: 20px; padding-bottom: 20px;
display: flex;
justify-content: space-between;
align-items: center;
&-narrow {
justify-content: center;
}
} }
.sidebar-logo { .sidebar-logo {
position: absolute; display: inline-flex;
right: 0; }
bottom: 18px;
.sidebar-title-container {
display: inline-flex;
flex-direction: column;
} }
.sidebar-title { .sidebar-title {

View File

@ -2,8 +2,7 @@
require("../polyfill"); require("../polyfill");
import { useState, useEffect } from "react"; import { useEffect, useState } from "react";
import styles from "./home.module.scss"; import styles from "./home.module.scss";
import BotIcon from "../icons/bot.svg"; import BotIcon from "../icons/bot.svg";
@ -12,33 +11,39 @@ import LoadingIcon from "../icons/three-dots.svg";
import { getCSSVar, useMobileScreen } from "../utils"; import { getCSSVar, useMobileScreen } from "../utils";
import dynamic from "next/dynamic"; import dynamic from "next/dynamic";
import { ModelProvider, Path, SlotID } from "../constant"; import { Path, SlotID } from "../constant";
import { ErrorBoundary } from "./error"; import { ErrorBoundary } from "./error";
import { getISOLang, getLang } from "../locales"; import { getISOLang, getLang } from "../locales";
import { import {
HashRouter as Router, HashRouter as Router,
Routes,
Route, Route,
Routes,
useLocation, useLocation,
} from "react-router-dom"; } from "react-router-dom";
import { SideBar } from "./sidebar"; import { SideBar } from "./sidebar";
import { useAppConfig } from "../store/config"; import { useAppConfig } from "../store/config";
import { AuthPage } from "./auth"; import { AuthPage } from "./auth";
import { getClientConfig } from "../config/client"; import { getClientConfig } from "../config/client";
import { ClientApi } from "../client/api"; import { type ClientApi, getClientApi } from "../client/api";
import { useAccessStore } from "../store"; import { useAccessStore } from "../store";
import clsx from "clsx";
import { initializeMcpSystem, isMcpEnabled } from "../mcp/actions";
export function Loading(props: { noLogo?: boolean }) { export function Loading(props: { noLogo?: boolean }) {
return ( return (
<div className={styles["loading-content"] + " no-dark"}> <div className={clsx("no-dark", styles["loading-content"])}>
{!props.noLogo && <BotIcon />} {!props.noLogo && <BotIcon />}
<LoadingIcon /> <LoadingIcon />
</div> </div>
); );
} }
const Artifacts = dynamic(async () => (await import("./artifacts")).Artifacts, {
loading: () => <Loading noLogo />,
});
const Settings = dynamic(async () => (await import("./settings")).Settings, { const Settings = dynamic(async () => (await import("./settings")).Settings, {
loading: () => <Loading noLogo />, loading: () => <Loading noLogo />,
}); });
@ -55,6 +60,28 @@ const MaskPage = dynamic(async () => (await import("./mask")).MaskPage, {
loading: () => <Loading noLogo />, loading: () => <Loading noLogo />,
}); });
const PluginPage = dynamic(async () => (await import("./plugin")).PluginPage, {
loading: () => <Loading noLogo />,
});
const SearchChat = dynamic(
async () => (await import("./search-chat")).SearchChatPage,
{
loading: () => <Loading noLogo />,
},
);
const Sd = dynamic(async () => (await import("./sd")).Sd, {
loading: () => <Loading noLogo />,
});
const McpMarketPage = dynamic(
async () => (await import("./mcp-market")).McpMarketPage,
{
loading: () => <Loading noLogo />,
},
);
export function useSwitchTheme() { export function useSwitchTheme() {
const config = useAppConfig(); const config = useAppConfig();
@ -122,11 +149,23 @@ const loadAsyncGoogleFont = () => {
document.head.appendChild(linkEl); document.head.appendChild(linkEl);
}; };
export function WindowContent(props: { children: React.ReactNode }) {
return (
<div className={styles["window-content"]} id={SlotID.AppBody}>
{props?.children}
</div>
);
}
function Screen() { function Screen() {
const config = useAppConfig(); const config = useAppConfig();
const location = useLocation(); const location = useLocation();
const isArtifact = location.pathname.includes(Path.Artifacts);
const isHome = location.pathname === Path.Home; const isHome = location.pathname === Path.Home;
const isAuth = location.pathname === Path.Auth; const isAuth = location.pathname === Path.Auth;
const isSd = location.pathname === Path.Sd;
const isSdNew = location.pathname === Path.SdNew;
const isMobileScreen = useMobileScreen(); const isMobileScreen = useMobileScreen();
const shouldTightBorder = const shouldTightBorder =
getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen); getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen);
@ -135,34 +174,48 @@ function Screen() {
loadAsyncGoogleFont(); loadAsyncGoogleFont();
}, []); }, []);
if (isArtifact) {
return (
<Routes>
<Route path="/artifacts/:id" element={<Artifacts />} />
</Routes>
);
}
const renderContent = () => {
if (isAuth) return <AuthPage />;
if (isSd) return <Sd />;
if (isSdNew) return <Sd />;
return (
<>
<SideBar
className={clsx({
[styles["sidebar-show"]]: isHome,
})}
/>
<WindowContent>
<Routes>
<Route path={Path.Home} element={<Chat />} />
<Route path={Path.NewChat} element={<NewChat />} />
<Route path={Path.Masks} element={<MaskPage />} />
<Route path={Path.Plugins} element={<PluginPage />} />
<Route path={Path.SearchChat} element={<SearchChat />} />
<Route path={Path.Chat} element={<Chat />} />
<Route path={Path.Settings} element={<Settings />} />
<Route path={Path.McpMarket} element={<McpMarketPage />} />
</Routes>
</WindowContent>
</>
);
};
return ( return (
<div <div
className={ className={clsx(styles.container, {
styles.container + [styles["tight-container"]]: shouldTightBorder,
` ${shouldTightBorder ? styles["tight-container"] : styles.container} ${ [styles["rtl-screen"]]: getLang() === "ar",
getLang() === "ar" ? styles["rtl-screen"] : "" })}
}`
}
> >
{isAuth ? ( {renderContent()}
<>
<AuthPage />
</>
) : (
<>
<SideBar className={isHome ? styles["sidebar-show"] : ""} />
<div className={styles["window-content"]} id={SlotID.AppBody}>
<Routes>
<Route path={Path.Home} element={<Chat />} />
<Route path={Path.NewChat} element={<NewChat />} />
<Route path={Path.Masks} element={<MaskPage />} />
<Route path={Path.Chat} element={<Chat />} />
<Route path={Path.Settings} element={<Settings />} />
</Routes>
</div>
</>
)}
</div> </div>
); );
} }
@ -170,12 +223,8 @@ function Screen() {
export function useLoadData() { export function useLoadData() {
const config = useAppConfig(); const config = useAppConfig();
var api: ClientApi; const api: ClientApi = getClientApi(config.modelConfig.providerName);
if (config.modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);
} else {
api = new ClientApi(ModelProvider.GPT);
}
useEffect(() => { useEffect(() => {
(async () => { (async () => {
const models = await api.llm.models(); const models = await api.llm.models();
@ -193,6 +242,20 @@ export function Home() {
useEffect(() => { useEffect(() => {
console.log("[Config] got config from build time", getClientConfig()); console.log("[Config] got config from build time", getClientConfig());
useAccessStore.getState().fetch(); useAccessStore.getState().fetch();
const initMcp = async () => {
try {
const enabled = await isMcpEnabled();
if (enabled) {
console.log("[MCP] initializing...");
await initializeMcpSystem();
console.log("[MCP] initialized");
}
} catch (err) {
console.error("[MCP] failed to initialize:", err);
}
};
initMcp();
}, []); }, []);
if (!useHasHydrated()) { if (!useHasHydrated()) {

View File

@ -1,5 +1,6 @@
import * as React from "react"; import * as React from "react";
import styles from "./input-range.module.scss"; import styles from "./input-range.module.scss";
import clsx from "clsx";
interface InputRangeProps { interface InputRangeProps {
onChange: React.ChangeEventHandler<HTMLInputElement>; onChange: React.ChangeEventHandler<HTMLInputElement>;
@ -9,6 +10,7 @@ interface InputRangeProps {
min: string; min: string;
max: string; max: string;
step: string; step: string;
aria: string;
} }
export function InputRange({ export function InputRange({
@ -19,11 +21,13 @@ export function InputRange({
min, min,
max, max,
step, step,
aria,
}: InputRangeProps) { }: InputRangeProps) {
return ( return (
<div className={styles["input-range"] + ` ${className ?? ""}`}> <div className={clsx(styles["input-range"], className)}>
{title || value} {title || value}
<input <input
aria-label={aria}
type="range" type="range"
title={title} title={title}
value={value} value={value}

View File

@ -6,13 +6,24 @@ import RehypeKatex from "rehype-katex";
import RemarkGfm from "remark-gfm"; import RemarkGfm from "remark-gfm";
import RehypeHighlight from "rehype-highlight"; import RehypeHighlight from "rehype-highlight";
import { useRef, useState, RefObject, useEffect, useMemo } from "react"; import { useRef, useState, RefObject, useEffect, useMemo } from "react";
import { copyToClipboard } from "../utils"; import { copyToClipboard, useWindowSize } from "../utils";
import mermaid from "mermaid"; import mermaid from "mermaid";
import Locale from "../locales";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import ReloadButtonIcon from "../icons/reload.svg";
import React from "react"; import React from "react";
import { useDebouncedCallback } from "use-debounce"; import { useDebouncedCallback } from "use-debounce";
import { showImageModal } from "./ui-lib"; import { showImageModal, FullScreen } from "./ui-lib";
import {
ArtifactsShareButton,
HTMLPreview,
HTMLPreviewHander,
} from "./artifacts";
import { useChatStore } from "../store";
import { IconButton } from "./button";
import { useAppConfig } from "../store/config";
import clsx from "clsx";
export function Mermaid(props: { code: string }) { export function Mermaid(props: { code: string }) {
const ref = useRef<HTMLDivElement>(null); const ref = useRef<HTMLDivElement>(null);
@ -47,7 +58,7 @@ export function Mermaid(props: { code: string }) {
return ( return (
<div <div
className="no-dark mermaid" className={clsx("no-dark", "mermaid")}
style={{ style={{
cursor: "pointer", cursor: "pointer",
overflow: "auto", overflow: "auto",
@ -62,65 +73,204 @@ export function Mermaid(props: { code: string }) {
export function PreCode(props: { children: any }) { export function PreCode(props: { children: any }) {
const ref = useRef<HTMLPreElement>(null); const ref = useRef<HTMLPreElement>(null);
const refText = ref.current?.innerText; const previewRef = useRef<HTMLPreviewHander>(null);
const [mermaidCode, setMermaidCode] = useState(""); const [mermaidCode, setMermaidCode] = useState("");
const [htmlCode, setHtmlCode] = useState("");
const { height } = useWindowSize();
const chatStore = useChatStore();
const session = chatStore.currentSession();
const renderMermaid = useDebouncedCallback(() => { const renderArtifacts = useDebouncedCallback(() => {
if (!ref.current) return; if (!ref.current) return;
const mermaidDom = ref.current.querySelector("code.language-mermaid"); const mermaidDom = ref.current.querySelector("code.language-mermaid");
if (mermaidDom) { if (mermaidDom) {
setMermaidCode((mermaidDom as HTMLElement).innerText); setMermaidCode((mermaidDom as HTMLElement).innerText);
} }
const htmlDom = ref.current.querySelector("code.language-html");
const refText = ref.current.querySelector("code")?.innerText;
if (htmlDom) {
setHtmlCode((htmlDom as HTMLElement).innerText);
} else if (
refText?.startsWith("<!DOCTYPE") ||
refText?.startsWith("<svg") ||
refText?.startsWith("<?xml")
) {
setHtmlCode(refText);
}
}, 600); }, 600);
const config = useAppConfig();
const enableArtifacts =
session.mask?.enableArtifacts !== false && config.enableArtifacts;
//Wrap the paragraph for plain-text
useEffect(() => { useEffect(() => {
setTimeout(renderMermaid, 1); if (ref.current) {
// eslint-disable-next-line react-hooks/exhaustive-deps const codeElements = ref.current.querySelectorAll(
}, [refText]); "code",
) as NodeListOf<HTMLElement>;
const wrapLanguages = [
"",
"md",
"markdown",
"text",
"txt",
"plaintext",
"tex",
"latex",
];
codeElements.forEach((codeElement) => {
let languageClass = codeElement.className.match(/language-(\w+)/);
let name = languageClass ? languageClass[1] : "";
if (wrapLanguages.includes(name)) {
codeElement.style.whiteSpace = "pre-wrap";
}
});
setTimeout(renderArtifacts, 1);
}
}, []);
return ( return (
<> <>
{mermaidCode.length > 0 && (
<Mermaid code={mermaidCode} key={mermaidCode} />
)}
<pre ref={ref}> <pre ref={ref}>
<span <span
className="copy-code-button" className="copy-code-button"
onClick={() => { onClick={() => {
if (ref.current) { if (ref.current) {
const code = ref.current.innerText; copyToClipboard(
copyToClipboard(code); ref.current.querySelector("code")?.innerText ?? "",
);
} }
}} }}
></span> ></span>
{props.children} {props.children}
</pre> </pre>
{mermaidCode.length > 0 && (
<Mermaid code={mermaidCode} key={mermaidCode} />
)}
{htmlCode.length > 0 && enableArtifacts && (
<FullScreen className="no-dark html" right={70}>
<ArtifactsShareButton
style={{ position: "absolute", right: 20, top: 10 }}
getCode={() => htmlCode}
/>
<IconButton
style={{ position: "absolute", right: 120, top: 10 }}
bordered
icon={<ReloadButtonIcon />}
shadow
onClick={() => previewRef.current?.reload()}
/>
<HTMLPreview
ref={previewRef}
code={htmlCode}
autoHeight={!document.fullscreenElement}
height={!document.fullscreenElement ? 600 : height}
/>
</FullScreen>
)}
</> </>
); );
} }
function escapeDollarNumber(text: string) { function CustomCode(props: { children: any; className?: string }) {
let escapedText = ""; const chatStore = useChatStore();
const session = chatStore.currentSession();
const config = useAppConfig();
const enableCodeFold =
session.mask?.enableCodeFold !== false && config.enableCodeFold;
for (let i = 0; i < text.length; i += 1) { const ref = useRef<HTMLPreElement>(null);
let char = text[i]; const [collapsed, setCollapsed] = useState(true);
const nextChar = text[i + 1] || " "; const [showToggle, setShowToggle] = useState(false);
if (char === "$" && nextChar >= "0" && nextChar <= "9") { useEffect(() => {
char = "\\$"; if (ref.current) {
const codeHeight = ref.current.scrollHeight;
setShowToggle(codeHeight > 400);
ref.current.scrollTop = ref.current.scrollHeight;
} }
}, [props.children]);
escapedText += char; const toggleCollapsed = () => {
setCollapsed((collapsed) => !collapsed);
};
const renderShowMoreButton = () => {
if (showToggle && enableCodeFold && collapsed) {
return (
<div
className={clsx("show-hide-button", {
collapsed,
expanded: !collapsed,
})}
>
<button onClick={toggleCollapsed}>{Locale.NewChat.More}</button>
</div>
);
}
return null;
};
return (
<>
<code
className={clsx(props?.className)}
ref={ref}
style={{
maxHeight: enableCodeFold && collapsed ? "400px" : "none",
overflowY: "hidden",
}}
>
{props.children}
</code>
{renderShowMoreButton()}
</>
);
}
function escapeBrackets(text: string) {
const pattern =
/(```[\s\S]*?```|`.*?`)|\\\[([\s\S]*?[^\\])\\\]|\\\((.*?)\\\)/g;
return text.replace(
pattern,
(match, codeBlock, squareBracket, roundBracket) => {
if (codeBlock) {
return codeBlock;
} else if (squareBracket) {
return `$$${squareBracket}$$`;
} else if (roundBracket) {
return `$${roundBracket}$`;
}
return match;
},
);
}
function tryWrapHtmlCode(text: string) {
// try add wrap html code (fixed: html codeblock include 2 newline)
// ignore embed codeblock
if (text.includes("```")) {
return text;
} }
return text
return escapedText; .replace(
/([`]*?)(\w*?)([\n\r]*?)(<!DOCTYPE html>)/g,
(match, quoteStart, lang, newLine, doctype) => {
return !quoteStart ? "\n```html\n" + doctype : match;
},
)
.replace(
/(<\/body>)([\r\n\s]*?)(<\/html>)([\n\r]*)([`]*)([\n\r]*?)/g,
(match, bodyEnd, space, htmlEnd, newLine, quoteEnd) => {
return !quoteEnd ? bodyEnd + space + htmlEnd + "\n```\n" : match;
},
);
} }
function _MarkDownContent(props: { content: string }) { function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo( const escapedContent = useMemo(() => {
() => escapeDollarNumber(props.content), return tryWrapHtmlCode(escapeBrackets(props.content));
[props.content], }, [props.content]);
);
return ( return (
<ReactMarkdown <ReactMarkdown
@ -137,9 +287,24 @@ function _MarkDownContent(props: { content: string }) {
]} ]}
components={{ components={{
pre: PreCode, pre: PreCode,
code: CustomCode,
p: (pProps) => <p {...pProps} dir="auto" />, p: (pProps) => <p {...pProps} dir="auto" />,
a: (aProps) => { a: (aProps) => {
const href = aProps.href || ""; const href = aProps.href || "";
if (/\.(aac|mp3|opus|wav)$/.test(href)) {
return (
<figure>
<audio controls src={href}></audio>
</figure>
);
}
if (/\.(3gp|3g2|webm|ogv|mpeg|mp4|avi)$/.test(href)) {
return (
<video controls width="99.9%">
<source src={href} />
</video>
);
}
const isInternal = /^\/#/i.test(href); const isInternal = /^\/#/i.test(href);
const target = isInternal ? "_self" : aProps.target ?? "_blank"; const target = isInternal ? "_self" : aProps.target ?? "_blank";
return <a {...aProps} target={target} />; return <a {...aProps} target={target} />;
@ -158,6 +323,7 @@ export function Markdown(
content: string; content: string;
loading?: boolean; loading?: boolean;
fontSize?: number; fontSize?: number;
fontFamily?: string;
parentRef?: RefObject<HTMLDivElement>; parentRef?: RefObject<HTMLDivElement>;
defaultShow?: boolean; defaultShow?: boolean;
} & React.DOMAttributes<HTMLDivElement>, } & React.DOMAttributes<HTMLDivElement>,
@ -169,6 +335,7 @@ export function Markdown(
className="markdown-body" className="markdown-body"
style={{ style={{
fontSize: `${props.fontSize ?? 14}px`, fontSize: `${props.fontSize ?? 14}px`,
fontFamily: props.fontFamily || "inherit",
}} }}
ref={mdRef} ref={mdRef}
onContextMenu={props.onContextMenu} onContextMenu={props.onContextMenu}

View File

@ -22,7 +22,7 @@ import {
useAppConfig, useAppConfig,
useChatStore, useChatStore,
} from "../store"; } from "../store";
import { ROLES } from "../client/api"; import { MultimodalContent, ROLES } from "../client/api";
import { import {
Input, Input,
List, List,
@ -37,19 +37,25 @@ import Locale, { AllLangs, ALL_LANG_OPTIONS, Lang } from "../locales";
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import chatStyle from "./chat.module.scss"; import chatStyle from "./chat.module.scss";
import { useEffect, useState } from "react"; import { useState } from "react";
import { copyToClipboard, downloadAs, readFromFile } from "../utils"; import {
copyToClipboard,
downloadAs,
getMessageImages,
readFromFile,
} from "../utils";
import { Updater } from "../typing"; import { Updater } from "../typing";
import { ModelConfigList } from "./model-config"; import { ModelConfigList } from "./model-config";
import { FileName, Path } from "../constant"; import { FileName, Path } from "../constant";
import { BUILTIN_MASK_STORE } from "../masks"; import { BUILTIN_MASK_STORE } from "../masks";
import { nanoid } from "nanoid";
import { import {
DragDropContext, DragDropContext,
Droppable, Droppable,
Draggable, Draggable,
OnDragEndResponder, OnDragEndResponder,
} from "@hello-pangea/dnd"; } from "@hello-pangea/dnd";
import { getMessageTextContent } from "../utils";
import clsx from "clsx";
// drag and drop helper function // drag and drop helper function
function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] { function reorder<T>(list: T[], startIndex: number, endIndex: number): T[] {
@ -121,6 +127,8 @@ export function MaskConfig(props: {
onClose={() => setShowPicker(false)} onClose={() => setShowPicker(false)}
> >
<div <div
tabIndex={0}
aria-label={Locale.Mask.Config.Avatar}
onClick={() => setShowPicker(true)} onClick={() => setShowPicker(true)}
style={{ cursor: "pointer" }} style={{ cursor: "pointer" }}
> >
@ -133,6 +141,7 @@ export function MaskConfig(props: {
</ListItem> </ListItem>
<ListItem title={Locale.Mask.Config.Name}> <ListItem title={Locale.Mask.Config.Name}>
<input <input
aria-label={Locale.Mask.Config.Name}
type="text" type="text"
value={props.mask.name} value={props.mask.name}
onInput={(e) => onInput={(e) =>
@ -147,6 +156,7 @@ export function MaskConfig(props: {
subTitle={Locale.Mask.Config.HideContext.SubTitle} subTitle={Locale.Mask.Config.HideContext.SubTitle}
> >
<input <input
aria-label={Locale.Mask.Config.HideContext.Title}
type="checkbox" type="checkbox"
checked={props.mask.hideContext} checked={props.mask.hideContext}
onChange={(e) => { onChange={(e) => {
@ -157,12 +167,48 @@ export function MaskConfig(props: {
></input> ></input>
</ListItem> </ListItem>
{globalConfig.enableArtifacts && (
<ListItem
title={Locale.Mask.Config.Artifacts.Title}
subTitle={Locale.Mask.Config.Artifacts.SubTitle}
>
<input
aria-label={Locale.Mask.Config.Artifacts.Title}
type="checkbox"
checked={props.mask.enableArtifacts !== false}
onChange={(e) => {
props.updateMask((mask) => {
mask.enableArtifacts = e.currentTarget.checked;
});
}}
></input>
</ListItem>
)}
{globalConfig.enableCodeFold && (
<ListItem
title={Locale.Mask.Config.CodeFold.Title}
subTitle={Locale.Mask.Config.CodeFold.SubTitle}
>
<input
aria-label={Locale.Mask.Config.CodeFold.Title}
type="checkbox"
checked={props.mask.enableCodeFold !== false}
onChange={(e) => {
props.updateMask((mask) => {
mask.enableCodeFold = e.currentTarget.checked;
});
}}
></input>
</ListItem>
)}
{!props.shouldSyncFromGlobal ? ( {!props.shouldSyncFromGlobal ? (
<ListItem <ListItem
title={Locale.Mask.Config.Share.Title} title={Locale.Mask.Config.Share.Title}
subTitle={Locale.Mask.Config.Share.SubTitle} subTitle={Locale.Mask.Config.Share.SubTitle}
> >
<IconButton <IconButton
aria={Locale.Mask.Config.Share.Title}
icon={<CopyIcon />} icon={<CopyIcon />}
text={Locale.Mask.Config.Share.Action} text={Locale.Mask.Config.Share.Action}
onClick={copyMaskLink} onClick={copyMaskLink}
@ -176,6 +222,7 @@ export function MaskConfig(props: {
subTitle={Locale.Mask.Config.Sync.SubTitle} subTitle={Locale.Mask.Config.Sync.SubTitle}
> >
<input <input
aria-label={Locale.Mask.Config.Sync.Title}
type="checkbox" type="checkbox"
checked={props.mask.syncGlobalConfig} checked={props.mask.syncGlobalConfig}
onChange={async (e) => { onChange={async (e) => {
@ -244,7 +291,7 @@ function ContextPromptItem(props: {
</> </>
)} )}
<Input <Input
value={props.prompt.content} value={getMessageTextContent(props.prompt)}
type="text" type="text"
className={chatStyle["context-content"]} className={chatStyle["context-content"]}
rows={focusingInput ? 5 : 1} rows={focusingInput ? 5 : 1}
@ -289,7 +336,18 @@ export function ContextPrompts(props: {
}; };
const updateContextPrompt = (i: number, prompt: ChatMessage) => { const updateContextPrompt = (i: number, prompt: ChatMessage) => {
props.updateContext((context) => (context[i] = prompt)); props.updateContext((context) => {
const images = getMessageImages(context[i]);
context[i] = prompt;
if (images.length > 0) {
const text = getMessageTextContent(context[i]);
const newContext: MultimodalContent[] = [{ type: "text", text }];
for (const img of images) {
newContext.push({ type: "image_url", image_url: { url: img } });
}
context[i].content = newContext;
}
});
}; };
const onDragEnd: OnDragEndResponder = (result) => { const onDragEnd: OnDragEndResponder = (result) => {
@ -387,7 +445,7 @@ export function MaskPage() {
const maskStore = useMaskStore(); const maskStore = useMaskStore();
const chatStore = useChatStore(); const chatStore = useChatStore();
const [filterLang, setFilterLang] = useState<Lang>(); const filterLang = maskStore.language;
const allMasks = maskStore const allMasks = maskStore
.getAll() .getAll()
@ -494,9 +552,9 @@ export function MaskPage() {
onChange={(e) => { onChange={(e) => {
const value = e.currentTarget.value; const value = e.currentTarget.value;
if (value === Locale.Settings.Lang.All) { if (value === Locale.Settings.Lang.All) {
setFilterLang(undefined); maskStore.setLanguage(undefined);
} else { } else {
setFilterLang(value as Lang); maskStore.setLanguage(value as Lang);
} }
}} }}
> >
@ -531,7 +589,7 @@ export function MaskPage() {
</div> </div>
<div className={styles["mask-title"]}> <div className={styles["mask-title"]}>
<div className={styles["mask-name"]}>{m.name}</div> <div className={styles["mask-name"]}>{m.name}</div>
<div className={styles["mask-info"] + " one-line"}> <div className={clsx(styles["mask-info"], "one-line")}>
{`${Locale.Mask.Item.Info(m.context.length)} / ${ {`${Locale.Mask.Item.Info(m.context.length)} / ${
ALL_LANG_OPTIONS[m.lang] ALL_LANG_OPTIONS[m.lang]
} / ${m.modelConfig.model}`} } / ${m.modelConfig.model}`}

View File

@ -0,0 +1,657 @@
@import "../styles/animation.scss";
.mcp-market-page {
height: 100%;
display: flex;
flex-direction: column;
.loading-indicator {
font-size: 12px;
color: var(--primary);
margin-left: 8px;
font-weight: normal;
opacity: 0.8;
}
.mcp-market-page-body {
padding: 20px;
overflow-y: auto;
.loading-container,
.empty-container {
display: flex;
justify-content: center;
align-items: center;
min-height: 200px;
width: 100%;
background-color: var(--white);
border: var(--border-in-light);
border-radius: 10px;
animation: slide-in ease 0.3s;
}
.loading-text,
.empty-text {
font-size: 14px;
color: var(--black);
opacity: 0.5;
text-align: center;
}
.mcp-market-filter {
width: 100%;
max-width: 100%;
margin-bottom: 20px;
animation: slide-in ease 0.3s;
height: 40px;
display: flex;
.search-bar {
flex-grow: 1;
max-width: 100%;
min-width: 0;
}
}
.server-list {
display: flex;
flex-direction: column;
gap: 1px;
}
.mcp-market-item {
padding: 20px;
border: var(--border-in-light);
animation: slide-in ease 0.3s;
background-color: var(--white);
transition: all 0.3s ease;
&.disabled {
opacity: 0.7;
pointer-events: none;
}
&:not(:last-child) {
border-bottom: 0;
}
&:first-child {
border-top-left-radius: 10px;
border-top-right-radius: 10px;
}
&:last-child {
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
}
&.loading {
position: relative;
&::after {
content: "";
position: absolute;
top: 0;
left: 0;
right: 0;
bottom: 0;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.2),
transparent
);
background-size: 200% 100%;
animation: loading-pulse 1.5s infinite;
}
}
.operation-status {
display: inline-flex;
align-items: center;
margin-left: 10px;
padding: 2px 8px;
border-radius: 4px;
font-size: 12px;
background-color: #16a34a;
color: #fff;
animation: pulse 1.5s infinite;
&[data-status="stopping"] {
background-color: #9ca3af;
}
&[data-status="starting"] {
background-color: #4ade80;
}
&[data-status="error"] {
background-color: #f87171;
}
}
.mcp-market-header {
display: flex;
justify-content: space-between;
align-items: flex-start;
width: 100%;
.mcp-market-title {
flex-grow: 1;
margin-right: 20px;
max-width: calc(100% - 300px);
}
.mcp-market-name {
font-size: 14px;
font-weight: bold;
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
.server-status {
display: inline-flex;
align-items: center;
margin-left: 10px;
padding: 2px 8px;
border-radius: 4px;
font-size: 12px;
background-color: #22c55e;
color: #fff;
&.error {
background-color: #ef4444;
}
&.stopped {
background-color: #6b7280;
}
&.initializing {
background-color: #f59e0b;
animation: pulse 1.5s infinite;
}
.error-message {
margin-left: 4px;
font-size: 12px;
}
}
}
.repo-link {
color: var(--primary);
font-size: 12px;
display: inline-flex;
align-items: center;
gap: 4px;
text-decoration: none;
opacity: 0.8;
transition: opacity 0.2s;
&:hover {
opacity: 1;
}
svg {
width: 14px;
height: 14px;
}
}
.tags-container {
display: flex;
gap: 4px;
flex-wrap: wrap;
margin-bottom: 8px;
}
.tag {
background: var(--gray);
color: var(--black);
padding: 2px 6px;
border-radius: 4px;
font-size: 10px;
opacity: 0.8;
}
.mcp-market-info {
color: var(--black);
font-size: 12px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.mcp-market-actions {
display: flex;
gap: 12px;
align-items: flex-start;
flex-shrink: 0;
min-width: 180px;
justify-content: flex-end;
}
}
}
}
.array-input {
display: flex;
flex-direction: column;
gap: 12px;
width: 100%;
padding: 16px;
border: 1px solid var(--gray-200);
border-radius: 10px;
background-color: var(--white);
.array-input-item {
display: flex;
gap: 8px;
align-items: center;
width: 100%;
padding: 0;
input {
width: 100%;
padding: 8px 12px;
background-color: var(--gray-50);
border-radius: 6px;
transition: all 0.3s ease;
font-size: 13px;
border: 1px solid var(--gray-200);
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
&:focus {
background-color: var(--white);
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300);
}
}
}
:global(.icon-button.add-path-button) {
width: 100%;
background-color: var(--primary);
color: white;
padding: 8px 12px;
border-radius: 6px;
transition: all 0.3s ease;
margin-top: 8px;
display: flex;
align-items: center;
justify-content: center;
border: none;
height: 36px;
&:hover {
background-color: var(--primary-dark);
}
svg {
width: 16px;
height: 16px;
margin-right: 4px;
filter: brightness(2);
}
}
}
.path-list {
width: 100%;
display: flex;
flex-direction: column;
gap: 10px;
.path-item {
display: flex;
gap: 10px;
width: 100%;
input {
flex: 1;
width: 100%;
max-width: 100%;
padding: 10px;
border: var(--border-in-light);
border-radius: 10px;
box-sizing: border-box;
font-size: 14px;
background-color: var(--white);
color: var(--black);
&:hover {
border-color: var(--gray-300);
}
&:focus {
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
}
.browse-button {
padding: 8px;
border: var(--border-in-light);
border-radius: 10px;
background-color: transparent;
color: var(--black-50);
&:hover {
border-color: var(--primary);
color: var(--primary);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
.delete-button {
padding: 8px;
border: var(--border-in-light);
border-radius: 10px;
background-color: transparent;
color: var(--black-50);
&:hover {
border-color: var(--danger);
color: var(--danger);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
.file-input {
display: none;
}
}
.add-button {
align-self: flex-start;
display: flex;
align-items: center;
gap: 5px;
padding: 8px 12px;
background-color: transparent;
border: var(--border-in-light);
border-radius: 10px;
color: var(--black);
font-size: 12px;
margin-top: 5px;
&:hover {
border-color: var(--primary);
color: var(--primary);
background-color: transparent;
}
svg {
width: 16px;
height: 16px;
}
}
}
.config-section {
width: 100%;
.config-header {
margin-bottom: 12px;
.config-title {
font-size: 14px;
font-weight: 600;
color: var(--black);
text-transform: capitalize;
}
.config-description {
font-size: 12px;
color: var(--gray-500);
margin-top: 4px;
}
}
.array-input {
display: flex;
flex-direction: column;
gap: 12px;
width: 100%;
padding: 16px;
border: 1px solid var(--gray-200);
border-radius: 10px;
background-color: var(--white);
.array-input-item {
display: flex;
gap: 8px;
align-items: center;
width: 100%;
padding: 0;
input {
width: 100%;
padding: 8px 12px;
background-color: var(--gray-50);
border-radius: 6px;
transition: all 0.3s ease;
font-size: 13px;
border: 1px solid var(--gray-200);
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
&:focus {
background-color: var(--white);
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300);
}
}
:global(.icon-button) {
width: 32px;
height: 32px;
padding: 0;
border-radius: 6px;
background-color: transparent;
border: 1px solid var(--gray-200);
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;
&:hover {
background-color: var(--gray-100);
border-color: var(--gray-300);
}
svg {
width: 16px;
height: 16px;
opacity: 0.7;
}
}
}
:global(.icon-button.add-path-button) {
width: 100%;
background-color: var(--primary);
color: white;
padding: 8px 12px;
border-radius: 6px;
transition: all 0.3s ease;
margin-top: 8px;
display: flex;
align-items: center;
justify-content: center;
border: none;
height: 36px;
&:hover {
background-color: var(--primary-dark);
}
svg {
width: 16px;
height: 16px;
margin-right: 4px;
filter: brightness(2);
}
}
}
}
.input-item {
width: 100%;
input {
width: 100%;
padding: 10px;
border: var(--border-in-light);
border-radius: 10px;
box-sizing: border-box;
font-size: 14px;
background-color: var(--white);
color: var(--black);
&:hover {
border-color: var(--gray-300);
}
&:focus {
border-color: var(--primary);
outline: none;
box-shadow: 0 0 0 2px var(--primary-10);
}
&::placeholder {
color: var(--gray-300) !important;
opacity: 1;
}
}
}
.tools-list {
display: flex;
flex-direction: column;
gap: 16px;
width: 100%;
padding: 20px;
max-width: 100%;
overflow-x: hidden;
word-break: break-word;
box-sizing: border-box;
.tool-item {
width: 100%;
box-sizing: border-box;
.tool-name {
font-size: 14px;
font-weight: 600;
color: var(--black);
margin-bottom: 8px;
padding-left: 12px;
border-left: 3px solid var(--primary);
box-sizing: border-box;
width: 100%;
}
.tool-description {
font-size: 13px;
color: var(--gray-500);
line-height: 1.6;
padding-left: 15px;
box-sizing: border-box;
width: 100%;
}
}
}
:global {
.modal-content {
margin-top: 20px;
max-width: 100%;
overflow-x: hidden;
}
.list {
padding: 10px;
margin-bottom: 10px;
background-color: var(--white);
}
.list-item {
border: none;
background-color: transparent;
border-radius: 10px;
padding: 10px;
margin-bottom: 10px;
display: flex;
flex-direction: column;
gap: 10px;
.list-header {
margin-bottom: 0;
.list-title {
font-size: 14px;
font-weight: bold;
text-transform: capitalize;
color: var(--black);
}
.list-sub-title {
font-size: 12px;
color: var(--gray-500);
margin-top: 4px;
}
}
}
}
}
@keyframes loading-pulse {
0% {
background-position: 200% 0;
}
100% {
background-position: -200% 0;
}
}
@keyframes pulse {
0% {
opacity: 0.6;
}
50% {
opacity: 1;
}
100% {
opacity: 0.6;
}
}

View File

@ -0,0 +1,755 @@
import { IconButton } from "./button";
import { ErrorBoundary } from "./error";
import styles from "./mcp-market.module.scss";
import EditIcon from "../icons/edit.svg";
import AddIcon from "../icons/add.svg";
import CloseIcon from "../icons/close.svg";
import DeleteIcon from "../icons/delete.svg";
import RestartIcon from "../icons/reload.svg";
import EyeIcon from "../icons/eye.svg";
import GithubIcon from "../icons/github.svg";
import { List, ListItem, Modal, showToast } from "./ui-lib";
import { useNavigate } from "react-router-dom";
import { useEffect, useState } from "react";
import {
addMcpServer,
getClientsStatus,
getClientTools,
getMcpConfigFromFile,
isMcpEnabled,
pauseMcpServer,
restartAllClients,
resumeMcpServer,
} from "../mcp/actions";
import {
ListToolsResponse,
McpConfigData,
PresetServer,
ServerConfig,
ServerStatusResponse,
} from "../mcp/types";
import clsx from "clsx";
import PlayIcon from "../icons/play.svg";
import StopIcon from "../icons/pause.svg";
import { Path } from "../constant";
interface ConfigProperty {
type: string;
description?: string;
required?: boolean;
minItems?: number;
}
export function McpMarketPage() {
const navigate = useNavigate();
const [mcpEnabled, setMcpEnabled] = useState(false);
const [searchText, setSearchText] = useState("");
const [userConfig, setUserConfig] = useState<Record<string, any>>({});
const [editingServerId, setEditingServerId] = useState<string | undefined>();
const [tools, setTools] = useState<ListToolsResponse["tools"] | null>(null);
const [viewingServerId, setViewingServerId] = useState<string | undefined>();
const [isLoading, setIsLoading] = useState(false);
const [config, setConfig] = useState<McpConfigData>();
const [clientStatuses, setClientStatuses] = useState<
Record<string, ServerStatusResponse>
>({});
const [loadingPresets, setLoadingPresets] = useState(true);
const [presetServers, setPresetServers] = useState<PresetServer[]>([]);
const [loadingStates, setLoadingStates] = useState<Record<string, string>>(
{},
);
// 检查 MCP 是否启用
useEffect(() => {
const checkMcpStatus = async () => {
const enabled = await isMcpEnabled();
setMcpEnabled(enabled);
if (!enabled) {
navigate(Path.Home);
}
};
checkMcpStatus();
}, [navigate]);
// 添加状态轮询
useEffect(() => {
if (!mcpEnabled || !config) return;
const updateStatuses = async () => {
const statuses = await getClientsStatus();
setClientStatuses(statuses);
};
// 立即执行一次
updateStatuses();
// 每 1000ms 轮询一次
const timer = setInterval(updateStatuses, 1000);
return () => clearInterval(timer);
}, [mcpEnabled, config]);
// 加载预设服务器
useEffect(() => {
const loadPresetServers = async () => {
if (!mcpEnabled) return;
try {
setLoadingPresets(true);
const response = await fetch("https://nextchat.club/mcp/list");
if (!response.ok) {
throw new Error("Failed to load preset servers");
}
const data = await response.json();
setPresetServers(data?.data ?? []);
} catch (error) {
console.error("Failed to load preset servers:", error);
showToast("Failed to load preset servers");
} finally {
setLoadingPresets(false);
}
};
loadPresetServers();
}, [mcpEnabled]);
// 加载初始状态
useEffect(() => {
const loadInitialState = async () => {
if (!mcpEnabled) return;
try {
setIsLoading(true);
const config = await getMcpConfigFromFile();
setConfig(config);
// 获取所有客户端的状态
const statuses = await getClientsStatus();
setClientStatuses(statuses);
} catch (error) {
console.error("Failed to load initial state:", error);
showToast("Failed to load initial state");
} finally {
setIsLoading(false);
}
};
loadInitialState();
}, [mcpEnabled]);
// 加载当前编辑服务器的配置
useEffect(() => {
if (!editingServerId || !config) return;
const currentConfig = config.mcpServers[editingServerId];
if (currentConfig) {
// 从当前配置中提取用户配置
const preset = presetServers.find((s) => s.id === editingServerId);
if (preset?.configSchema) {
const userConfig: Record<string, any> = {};
Object.entries(preset.argsMapping || {}).forEach(([key, mapping]) => {
if (mapping.type === "spread") {
// For spread types, extract the array from args.
const startPos = mapping.position ?? 0;
userConfig[key] = currentConfig.args.slice(startPos);
} else if (mapping.type === "single") {
// For single types, get a single value
userConfig[key] = currentConfig.args[mapping.position ?? 0];
} else if (
mapping.type === "env" &&
mapping.key &&
currentConfig.env
) {
// For env types, get values from environment variables
userConfig[key] = currentConfig.env[mapping.key];
}
});
setUserConfig(userConfig);
}
} else {
setUserConfig({});
}
}, [editingServerId, config, presetServers]);
if (!mcpEnabled) {
return null;
}
// 检查服务器是否已添加
const isServerAdded = (id: string) => {
return id in (config?.mcpServers ?? {});
};
// 保存服务器配置
const saveServerConfig = async () => {
const preset = presetServers.find((s) => s.id === editingServerId);
if (!preset || !preset.configSchema || !editingServerId) return;
const savingServerId = editingServerId;
setEditingServerId(undefined);
try {
updateLoadingState(savingServerId, "Updating configuration...");
// 构建服务器配置
const args = [...preset.baseArgs];
const env: Record<string, string> = {};
Object.entries(preset.argsMapping || {}).forEach(([key, mapping]) => {
const value = userConfig[key];
if (mapping.type === "spread" && Array.isArray(value)) {
const pos = mapping.position ?? 0;
args.splice(pos, 0, ...value);
} else if (
mapping.type === "single" &&
mapping.position !== undefined
) {
args[mapping.position] = value;
} else if (
mapping.type === "env" &&
mapping.key &&
typeof value === "string"
) {
env[mapping.key] = value;
}
});
const serverConfig: ServerConfig = {
command: preset.command,
args,
...(Object.keys(env).length > 0 ? { env } : {}),
};
const newConfig = await addMcpServer(savingServerId, serverConfig);
setConfig(newConfig);
showToast("Server configuration updated successfully");
} catch (error) {
showToast(
error instanceof Error ? error.message : "Failed to save configuration",
);
} finally {
updateLoadingState(savingServerId, null);
}
};
// 获取服务器支持的 Tools
const loadTools = async (id: string) => {
try {
const result = await getClientTools(id);
if (result) {
setTools(result);
} else {
throw new Error("Failed to load tools");
}
} catch (error) {
showToast("Failed to load tools");
console.error(error);
setTools(null);
}
};
// 更新加载状态的辅助函数
const updateLoadingState = (id: string, message: string | null) => {
setLoadingStates((prev) => {
if (message === null) {
const { [id]: _, ...rest } = prev;
return rest;
}
return { ...prev, [id]: message };
});
};
// 修改添加服务器函数
const addServer = async (preset: PresetServer) => {
if (!preset.configurable) {
try {
const serverId = preset.id;
updateLoadingState(serverId, "Creating MCP client...");
const serverConfig: ServerConfig = {
command: preset.command,
args: [...preset.baseArgs],
};
const newConfig = await addMcpServer(preset.id, serverConfig);
setConfig(newConfig);
// 更新状态
const statuses = await getClientsStatus();
setClientStatuses(statuses);
} finally {
updateLoadingState(preset.id, null);
}
} else {
// 如果需要配置,打开配置对话框
setEditingServerId(preset.id);
setUserConfig({});
}
};
// 修改暂停服务器函数
const pauseServer = async (id: string) => {
try {
updateLoadingState(id, "Stopping server...");
const newConfig = await pauseMcpServer(id);
setConfig(newConfig);
showToast("Server stopped successfully");
} catch (error) {
showToast("Failed to stop server");
console.error(error);
} finally {
updateLoadingState(id, null);
}
};
// Restart server
const restartServer = async (id: string) => {
try {
updateLoadingState(id, "Starting server...");
await resumeMcpServer(id);
} catch (error) {
showToast(
error instanceof Error
? error.message
: "Failed to start server, please check logs",
);
console.error(error);
} finally {
updateLoadingState(id, null);
}
};
// Restart all clients
const handleRestartAll = async () => {
try {
updateLoadingState("all", "Restarting all servers...");
const newConfig = await restartAllClients();
setConfig(newConfig);
showToast("Restarting all clients");
} catch (error) {
showToast("Failed to restart clients");
console.error(error);
} finally {
updateLoadingState("all", null);
}
};
// Render configuration form
const renderConfigForm = () => {
const preset = presetServers.find((s) => s.id === editingServerId);
if (!preset?.configSchema) return null;
return Object.entries(preset.configSchema.properties).map(
([key, prop]: [string, ConfigProperty]) => {
if (prop.type === "array") {
const currentValue = userConfig[key as keyof typeof userConfig] || [];
const itemLabel = (prop as any).itemLabel || key;
const addButtonText =
(prop as any).addButtonText || `Add ${itemLabel}`;
return (
<ListItem
key={key}
title={key}
subTitle={prop.description}
vertical
>
<div className={styles["path-list"]}>
{(currentValue as string[]).map(
(value: string, index: number) => (
<div key={index} className={styles["path-item"]}>
<input
type="text"
value={value}
placeholder={`${itemLabel} ${index + 1}`}
onChange={(e) => {
const newValue = [...currentValue] as string[];
newValue[index] = e.target.value;
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
<IconButton
icon={<DeleteIcon />}
className={styles["delete-button"]}
onClick={() => {
const newValue = [...currentValue] as string[];
newValue.splice(index, 1);
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
</div>
),
)}
<IconButton
icon={<AddIcon />}
text={addButtonText}
className={styles["add-button"]}
bordered
onClick={() => {
const newValue = [...currentValue, ""] as string[];
setUserConfig({ ...userConfig, [key]: newValue });
}}
/>
</div>
</ListItem>
);
} else if (prop.type === "string") {
const currentValue = userConfig[key as keyof typeof userConfig] || "";
return (
<ListItem key={key} title={key} subTitle={prop.description}>
<input
aria-label={key}
type="text"
value={currentValue}
placeholder={`Enter ${key}`}
onChange={(e) => {
setUserConfig({ ...userConfig, [key]: e.target.value });
}}
/>
</ListItem>
);
}
return null;
},
);
};
const checkServerStatus = (clientId: string) => {
return clientStatuses[clientId] || { status: "undefined", errorMsg: null };
};
const getServerStatusDisplay = (clientId: string) => {
const status = checkServerStatus(clientId);
const statusMap = {
undefined: null, // 未配置/未找到不显示
// 添加初始化状态
initializing: (
<span className={clsx(styles["server-status"], styles["initializing"])}>
Initializing
</span>
),
paused: (
<span className={clsx(styles["server-status"], styles["stopped"])}>
Stopped
</span>
),
active: <span className={styles["server-status"]}>Running</span>,
error: (
<span className={clsx(styles["server-status"], styles["error"])}>
Error
<span className={styles["error-message"]}>: {status.errorMsg}</span>
</span>
),
};
return statusMap[status.status];
};
// Get the type of operation status
const getOperationStatusType = (message: string) => {
if (message.toLowerCase().includes("stopping")) return "stopping";
if (message.toLowerCase().includes("starting")) return "starting";
if (message.toLowerCase().includes("error")) return "error";
return "default";
};
// 渲染服务器列表
const renderServerList = () => {
if (loadingPresets) {
return (
<div className={styles["loading-container"]}>
<div className={styles["loading-text"]}>
Loading preset server list...
</div>
</div>
);
}
if (!Array.isArray(presetServers) || presetServers.length === 0) {
return (
<div className={styles["empty-container"]}>
<div className={styles["empty-text"]}>No servers available</div>
</div>
);
}
return presetServers
.filter((server) => {
if (searchText.length === 0) return true;
const searchLower = searchText.toLowerCase();
return (
server.name.toLowerCase().includes(searchLower) ||
server.description.toLowerCase().includes(searchLower) ||
server.tags.some((tag) => tag.toLowerCase().includes(searchLower))
);
})
.sort((a, b) => {
const aStatus = checkServerStatus(a.id).status;
const bStatus = checkServerStatus(b.id).status;
const aLoading = loadingStates[a.id];
const bLoading = loadingStates[b.id];
// 定义状态优先级
const statusPriority: Record<string, number> = {
error: 0, // Highest priority for error status
active: 1, // Second for active
initializing: 2, // Initializing
starting: 3, // Starting
stopping: 4, // Stopping
paused: 5, // Paused
undefined: 6, // Lowest priority for undefined
};
// Get actual status (including loading status)
const getEffectiveStatus = (status: string, loading?: string) => {
if (loading) {
const operationType = getOperationStatusType(loading);
return operationType === "default" ? status : operationType;
}
if (status === "initializing" && !loading) {
return "active";
}
return status;
};
const aEffectiveStatus = getEffectiveStatus(aStatus, aLoading);
const bEffectiveStatus = getEffectiveStatus(bStatus, bLoading);
// 首先按状态排序
if (aEffectiveStatus !== bEffectiveStatus) {
return (
(statusPriority[aEffectiveStatus] ?? 6) -
(statusPriority[bEffectiveStatus] ?? 6)
);
}
// Sort by name when statuses are the same
return a.name.localeCompare(b.name);
})
.map((server) => (
<div
className={clsx(styles["mcp-market-item"], {
[styles["loading"]]: loadingStates[server.id],
})}
key={server.id}
>
<div className={styles["mcp-market-header"]}>
<div className={styles["mcp-market-title"]}>
<div className={styles["mcp-market-name"]}>
{server.name}
{loadingStates[server.id] && (
<span
className={styles["operation-status"]}
data-status={getOperationStatusType(
loadingStates[server.id],
)}
>
{loadingStates[server.id]}
</span>
)}
{!loadingStates[server.id] && getServerStatusDisplay(server.id)}
{server.repo && (
<a
href={server.repo}
target="_blank"
rel="noopener noreferrer"
className={styles["repo-link"]}
title="Open repository"
>
<GithubIcon />
</a>
)}
</div>
<div className={styles["tags-container"]}>
{server.tags.map((tag, index) => (
<span key={index} className={styles["tag"]}>
{tag}
</span>
))}
</div>
<div
className={clsx(styles["mcp-market-info"], "one-line")}
title={server.description}
>
{server.description}
</div>
</div>
<div className={styles["mcp-market-actions"]}>
{isServerAdded(server.id) ? (
<>
{server.configurable && (
<IconButton
icon={<EditIcon />}
text="Configure"
onClick={() => setEditingServerId(server.id)}
disabled={isLoading}
/>
)}
{checkServerStatus(server.id).status === "paused" ? (
<>
<IconButton
icon={<PlayIcon />}
text="Start"
onClick={() => restartServer(server.id)}
disabled={isLoading}
/>
{/* <IconButton
icon={<DeleteIcon />}
text="Remove"
onClick={() => removeServer(server.id)}
disabled={isLoading}
/> */}
</>
) : (
<>
<IconButton
icon={<EyeIcon />}
text="Tools"
onClick={async () => {
setViewingServerId(server.id);
await loadTools(server.id);
}}
disabled={
isLoading ||
checkServerStatus(server.id).status === "error"
}
/>
<IconButton
icon={<StopIcon />}
text="Stop"
onClick={() => pauseServer(server.id)}
disabled={isLoading}
/>
</>
)}
</>
) : (
<IconButton
icon={<AddIcon />}
text="Add"
onClick={() => addServer(server)}
disabled={isLoading}
/>
)}
</div>
</div>
</div>
));
};
return (
<ErrorBoundary>
<div className={styles["mcp-market-page"]}>
<div className="window-header">
<div className="window-header-title">
<div className="window-header-main-title">
MCP Market
{loadingStates["all"] && (
<span className={styles["loading-indicator"]}>
{loadingStates["all"]}
</span>
)}
</div>
<div className="window-header-sub-title">
{Object.keys(config?.mcpServers ?? {}).length} servers configured
</div>
</div>
<div className="window-actions">
<div className="window-action-button">
<IconButton
icon={<RestartIcon />}
bordered
onClick={handleRestartAll}
text="Restart All"
disabled={isLoading}
/>
</div>
<div className="window-action-button">
<IconButton
icon={<CloseIcon />}
bordered
onClick={() => navigate(-1)}
disabled={isLoading}
/>
</div>
</div>
</div>
<div className={styles["mcp-market-page-body"]}>
<div className={styles["mcp-market-filter"]}>
<input
type="text"
className={styles["search-bar"]}
placeholder={"Search MCP Server"}
autoFocus
onInput={(e) => setSearchText(e.currentTarget.value)}
/>
</div>
<div className={styles["server-list"]}>{renderServerList()}</div>
</div>
{/*编辑服务器配置*/}
{editingServerId && (
<div className="modal-mask">
<Modal
title={`Configure Server - ${editingServerId}`}
onClose={() => !isLoading && setEditingServerId(undefined)}
actions={[
<IconButton
key="cancel"
text="Cancel"
onClick={() => setEditingServerId(undefined)}
bordered
disabled={isLoading}
/>,
<IconButton
key="confirm"
text="Save"
type="primary"
onClick={saveServerConfig}
bordered
disabled={isLoading}
/>,
]}
>
<List>{renderConfigForm()}</List>
</Modal>
</div>
)}
{viewingServerId && (
<div className="modal-mask">
<Modal
title={`Server Details - ${viewingServerId}`}
onClose={() => setViewingServerId(undefined)}
actions={[
<IconButton
key="close"
text="Close"
onClick={() => setViewingServerId(undefined)}
bordered
/>,
]}
>
<div className={styles["tools-list"]}>
{isLoading ? (
<div>Loading...</div>
) : tools?.tools ? (
tools.tools.map(
(tool: ListToolsResponse["tools"], index: number) => (
<div key={index} className={styles["tool-item"]}>
<div className={styles["tool-name"]}>{tool.name}</div>
<div className={styles["tool-description"]}>
{tool.description}
</div>
</div>
),
)
) : (
<div>No tools available</div>
)}
</div>
</Modal>
</div>
)}
</div>
</ErrorBoundary>
);
}

View File

@ -7,6 +7,8 @@ import { MaskAvatar } from "./mask";
import Locale from "../locales"; import Locale from "../locales";
import styles from "./message-selector.module.scss"; import styles from "./message-selector.module.scss";
import { getMessageTextContent } from "../utils";
import clsx from "clsx";
function useShiftRange() { function useShiftRange() {
const [startIndex, setStartIndex] = useState<number>(); const [startIndex, setStartIndex] = useState<number>();
@ -70,6 +72,7 @@ export function MessageSelector(props: {
defaultSelectAll?: boolean; defaultSelectAll?: boolean;
onSelected?: (messages: ChatMessage[]) => void; onSelected?: (messages: ChatMessage[]) => void;
}) { }) {
const LATEST_COUNT = 4;
const chatStore = useChatStore(); const chatStore = useChatStore();
const session = chatStore.currentSession(); const session = chatStore.currentSession();
const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming; const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming;
@ -103,7 +106,9 @@ export function MessageSelector(props: {
const searchResults = new Set<string>(); const searchResults = new Set<string>();
if (text.length > 0) { if (text.length > 0) {
messages.forEach((m) => messages.forEach((m) =>
m.content.includes(text) ? searchResults.add(m.id!) : null, getMessageTextContent(m).includes(text)
? searchResults.add(m.id!)
: null,
); );
} }
setSearchIds(searchResults); setSearchIds(searchResults);
@ -138,15 +143,13 @@ export function MessageSelector(props: {
// eslint-disable-next-line react-hooks/exhaustive-deps // eslint-disable-next-line react-hooks/exhaustive-deps
}, [startIndex, endIndex]); }, [startIndex, endIndex]);
const LATEST_COUNT = 4;
return ( return (
<div className={styles["message-selector"]}> <div className={styles["message-selector"]}>
<div className={styles["message-filter"]}> <div className={styles["message-filter"]}>
<input <input
type="text" type="text"
placeholder={Locale.Select.Search} placeholder={Locale.Select.Search}
className={styles["filter-item"] + " " + styles["search-bar"]} className={clsx(styles["filter-item"], styles["search-bar"])}
value={searchInput} value={searchInput}
onInput={(e) => { onInput={(e) => {
setSearchInput(e.currentTarget.value); setSearchInput(e.currentTarget.value);
@ -193,9 +196,9 @@ export function MessageSelector(props: {
return ( return (
<div <div
className={`${styles["message"]} ${ className={clsx(styles["message"], {
props.selection.has(m.id!) && styles["message-selected"] [styles["message-selected"]]: props.selection.has(m.id!),
}`} })}
key={i} key={i}
onClick={() => { onClick={() => {
props.updateSelection((selection) => { props.updateSelection((selection) => {
@ -218,13 +221,13 @@ export function MessageSelector(props: {
<div className={styles["date"]}> <div className={styles["date"]}>
{new Date(m.date).toLocaleString()} {new Date(m.date).toLocaleString()}
</div> </div>
<div className={`${styles["content"]} one-line`}> <div className={clsx(styles["content"], "one-line")}>
{m.content} {getMessageTextContent(m)}
</div> </div>
</div> </div>
<div className={styles["checkbox"]}> <div className={styles["checkbox"]}>
<input type="checkbox" checked={isSelected}></input> <input type="checkbox" checked={isSelected} readOnly></input>
</div> </div>
</div> </div>
); );

View File

@ -0,0 +1,7 @@
.select-compress-model {
width: 60%;
select {
max-width: 100%;
white-space: normal;
}
}

View File

@ -1,37 +1,52 @@
import { ServiceProvider } from "@/app/constant";
import { ModalConfigValidator, ModelConfig } from "../store"; import { ModalConfigValidator, ModelConfig } from "../store";
import Locale from "../locales"; import Locale from "../locales";
import { InputRange } from "./input-range"; import { InputRange } from "./input-range";
import { ListItem, Select } from "./ui-lib"; import { ListItem, Select } from "./ui-lib";
import { useAllModels } from "../utils/hooks"; import { useAllModels } from "../utils/hooks";
import { groupBy } from "lodash-es";
import styles from "./model-config.module.scss";
import { getModelProvider } from "../utils/model";
export function ModelConfigList(props: { export function ModelConfigList(props: {
modelConfig: ModelConfig; modelConfig: ModelConfig;
updateConfig: (updater: (config: ModelConfig) => void) => void; updateConfig: (updater: (config: ModelConfig) => void) => void;
}) { }) {
const allModels = useAllModels(); const allModels = useAllModels();
const groupModels = groupBy(
allModels.filter((v) => v.available),
"provider.providerName",
);
const value = `${props.modelConfig.model}@${props.modelConfig?.providerName}`;
const compressModelValue = `${props.modelConfig.compressModel}@${props.modelConfig?.compressProviderName}`;
return ( return (
<> <>
<ListItem title={Locale.Settings.Model}> <ListItem title={Locale.Settings.Model}>
<Select <Select
value={props.modelConfig.model} aria-label={Locale.Settings.Model}
value={value}
align="left"
onChange={(e) => { onChange={(e) => {
props.updateConfig( const [model, providerName] = getModelProvider(
(config) => e.currentTarget.value,
(config.model = ModalConfigValidator.model(
e.currentTarget.value,
)),
); );
props.updateConfig((config) => {
config.model = ModalConfigValidator.model(model);
config.providerName = providerName as ServiceProvider;
});
}} }}
> >
{allModels {Object.keys(groupModels).map((providerName, index) => (
.filter((v) => v.available) <optgroup label={providerName} key={index}>
.map((v, i) => ( {groupModels[providerName].map((v, i) => (
<option value={v.name} key={i}> <option value={`${v.name}@${v.provider?.providerName}`} key={i}>
{v.displayName}({v.provider?.providerName}) {v.displayName}
</option> </option>
))} ))}
</optgroup>
))}
</Select> </Select>
</ListItem> </ListItem>
<ListItem <ListItem
@ -39,6 +54,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.Temperature.SubTitle} subTitle={Locale.Settings.Temperature.SubTitle}
> >
<InputRange <InputRange
aria={Locale.Settings.Temperature.Title}
value={props.modelConfig.temperature?.toFixed(1)} value={props.modelConfig.temperature?.toFixed(1)}
min="0" min="0"
max="1" // lets limit it to 0-1 max="1" // lets limit it to 0-1
@ -58,6 +74,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.TopP.SubTitle} subTitle={Locale.Settings.TopP.SubTitle}
> >
<InputRange <InputRange
aria={Locale.Settings.TopP.Title}
value={(props.modelConfig.top_p ?? 1).toFixed(1)} value={(props.modelConfig.top_p ?? 1).toFixed(1)}
min="0" min="0"
max="1" max="1"
@ -77,6 +94,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.MaxTokens.SubTitle} subTitle={Locale.Settings.MaxTokens.SubTitle}
> >
<input <input
aria-label={Locale.Settings.MaxTokens.Title}
type="number" type="number"
min={1024} min={1024}
max={512000} max={512000}
@ -92,13 +110,14 @@ export function ModelConfigList(props: {
></input> ></input>
</ListItem> </ListItem>
{props.modelConfig.model.startsWith("gemini") ? null : ( {props.modelConfig?.providerName == ServiceProvider.Google ? null : (
<> <>
<ListItem <ListItem
title={Locale.Settings.PresencePenalty.Title} title={Locale.Settings.PresencePenalty.Title}
subTitle={Locale.Settings.PresencePenalty.SubTitle} subTitle={Locale.Settings.PresencePenalty.SubTitle}
> >
<InputRange <InputRange
aria={Locale.Settings.PresencePenalty.Title}
value={props.modelConfig.presence_penalty?.toFixed(1)} value={props.modelConfig.presence_penalty?.toFixed(1)}
min="-2" min="-2"
max="2" max="2"
@ -120,6 +139,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.FrequencyPenalty.SubTitle} subTitle={Locale.Settings.FrequencyPenalty.SubTitle}
> >
<InputRange <InputRange
aria={Locale.Settings.FrequencyPenalty.Title}
value={props.modelConfig.frequency_penalty?.toFixed(1)} value={props.modelConfig.frequency_penalty?.toFixed(1)}
min="-2" min="-2"
max="2" max="2"
@ -141,6 +161,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.InjectSystemPrompts.SubTitle} subTitle={Locale.Settings.InjectSystemPrompts.SubTitle}
> >
<input <input
aria-label={Locale.Settings.InjectSystemPrompts.Title}
type="checkbox" type="checkbox"
checked={props.modelConfig.enableInjectSystemPrompts} checked={props.modelConfig.enableInjectSystemPrompts}
onChange={(e) => onChange={(e) =>
@ -158,6 +179,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.InputTemplate.SubTitle} subTitle={Locale.Settings.InputTemplate.SubTitle}
> >
<input <input
aria-label={Locale.Settings.InputTemplate.Title}
type="text" type="text"
value={props.modelConfig.template} value={props.modelConfig.template}
onChange={(e) => onChange={(e) =>
@ -174,6 +196,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.HistoryCount.SubTitle} subTitle={Locale.Settings.HistoryCount.SubTitle}
> >
<InputRange <InputRange
aria={Locale.Settings.HistoryCount.Title}
title={props.modelConfig.historyMessageCount.toString()} title={props.modelConfig.historyMessageCount.toString()}
value={props.modelConfig.historyMessageCount} value={props.modelConfig.historyMessageCount}
min="0" min="0"
@ -192,6 +215,7 @@ export function ModelConfigList(props: {
subTitle={Locale.Settings.CompressThreshold.SubTitle} subTitle={Locale.Settings.CompressThreshold.SubTitle}
> >
<input <input
aria-label={Locale.Settings.CompressThreshold.Title}
type="number" type="number"
min={500} min={500}
max={4000} max={4000}
@ -207,6 +231,7 @@ export function ModelConfigList(props: {
</ListItem> </ListItem>
<ListItem title={Locale.Memory.Title} subTitle={Locale.Memory.Send}> <ListItem title={Locale.Memory.Title} subTitle={Locale.Memory.Send}>
<input <input
aria-label={Locale.Memory.Title}
type="checkbox" type="checkbox"
checked={props.modelConfig.sendMemory} checked={props.modelConfig.sendMemory}
onChange={(e) => onChange={(e) =>
@ -216,6 +241,33 @@ export function ModelConfigList(props: {
} }
></input> ></input>
</ListItem> </ListItem>
<ListItem
title={Locale.Settings.CompressModel.Title}
subTitle={Locale.Settings.CompressModel.SubTitle}
>
<Select
className={styles["select-compress-model"]}
aria-label={Locale.Settings.CompressModel.Title}
value={compressModelValue}
onChange={(e) => {
const [model, providerName] = getModelProvider(
e.currentTarget.value,
);
props.updateConfig((config) => {
config.compressModel = ModalConfigValidator.model(model);
config.compressProviderName = providerName as ServiceProvider;
});
}}
>
{allModels
.filter((v) => v.available)
.map((v, i) => (
<option value={`${v.name}@${v.provider?.providerName}`} key={i}>
{v.displayName}({v.provider?.providerName})
</option>
))}
</Select>
</ListItem>
</> </>
); );
} }

View File

@ -16,6 +16,7 @@ import { MaskAvatar } from "./mask";
import { useCommand } from "../command"; import { useCommand } from "../command";
import { showConfirm } from "./ui-lib"; import { showConfirm } from "./ui-lib";
import { BUILTIN_MASK_STORE } from "../masks"; import { BUILTIN_MASK_STORE } from "../masks";
import clsx from "clsx";
function MaskItem(props: { mask: Mask; onClick?: () => void }) { function MaskItem(props: { mask: Mask; onClick?: () => void }) {
return ( return (
@ -24,7 +25,9 @@ function MaskItem(props: { mask: Mask; onClick?: () => void }) {
avatar={props.mask.avatar} avatar={props.mask.avatar}
model={props.mask.modelConfig.model} model={props.mask.modelConfig.model}
/> />
<div className={styles["mask-name"] + " one-line"}>{props.mask.name}</div> <div className={clsx(styles["mask-name"], "one-line")}>
{props.mask.name}
</div>
</div> </div>
); );
} }

View File

@ -0,0 +1,38 @@
.plugin-title {
font-weight: bolder;
font-size: 16px;
margin: 10px 0;
}
.plugin-content {
font-size: 14px;
font-family: inherit;
pre code {
max-height: 240px;
overflow-y: auto;
white-space: pre-wrap;
min-width: 280px;
}
}
.plugin-schema {
display: flex;
justify-content: flex-end;
flex-direction: row;
input {
margin-right: 20px;
@media screen and (max-width: 600px) {
margin-right: 0px;
}
}
@media screen and (max-width: 600px) {
flex-direction: column;
gap: 5px;
button {
padding: 10px;
}
}
}

370
app/components/plugin.tsx Normal file
View File

@ -0,0 +1,370 @@
import { useDebouncedCallback } from "use-debounce";
import OpenAPIClientAxios from "openapi-client-axios";
import yaml from "js-yaml";
import { PLUGINS_REPO_URL } from "../constant";
import { IconButton } from "./button";
import { ErrorBoundary } from "./error";
import styles from "./mask.module.scss";
import pluginStyles from "./plugin.module.scss";
import EditIcon from "../icons/edit.svg";
import AddIcon from "../icons/add.svg";
import CloseIcon from "../icons/close.svg";
import DeleteIcon from "../icons/delete.svg";
import ConfirmIcon from "../icons/confirm.svg";
import ReloadIcon from "../icons/reload.svg";
import GithubIcon from "../icons/github.svg";
import { Plugin, usePluginStore, FunctionToolService } from "../store/plugin";
import {
PasswordInput,
List,
ListItem,
Modal,
showConfirm,
showToast,
} from "./ui-lib";
import Locale from "../locales";
import { useNavigate } from "react-router-dom";
import { useState } from "react";
import clsx from "clsx";
export function PluginPage() {
const navigate = useNavigate();
const pluginStore = usePluginStore();
const allPlugins = pluginStore.getAll();
const [searchPlugins, setSearchPlugins] = useState<Plugin[]>([]);
const [searchText, setSearchText] = useState("");
const plugins = searchText.length > 0 ? searchPlugins : allPlugins;
// refactored already, now it accurate
const onSearch = (text: string) => {
setSearchText(text);
if (text.length > 0) {
const result = allPlugins.filter(
(m) => m?.title.toLowerCase().includes(text.toLowerCase()),
);
setSearchPlugins(result);
} else {
setSearchPlugins(allPlugins);
}
};
const [editingPluginId, setEditingPluginId] = useState<string | undefined>();
const editingPlugin = pluginStore.get(editingPluginId);
const editingPluginTool = FunctionToolService.get(editingPlugin?.id);
const closePluginModal = () => setEditingPluginId(undefined);
const onChangePlugin = useDebouncedCallback((editingPlugin, e) => {
const content = e.target.innerText;
try {
const api = new OpenAPIClientAxios({
definition: yaml.load(content) as any,
});
api
.init()
.then(() => {
if (content != editingPlugin.content) {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.content = content;
const tool = FunctionToolService.add(plugin, true);
plugin.title = tool.api.definition.info.title;
plugin.version = tool.api.definition.info.version;
});
}
})
.catch((e) => {
console.error(e);
showToast(Locale.Plugin.EditModal.Error);
});
} catch (e) {
console.error(e);
showToast(Locale.Plugin.EditModal.Error);
}
}, 100).bind(null, editingPlugin);
const [loadUrl, setLoadUrl] = useState<string>("");
const loadFromUrl = (loadUrl: string) =>
fetch(loadUrl)
.catch((e) => {
const p = new URL(loadUrl);
return fetch(`/api/proxy/${p.pathname}?${p.search}`, {
headers: {
"X-Base-URL": p.origin,
},
});
})
.then((res) => res.text())
.then((content) => {
try {
return JSON.stringify(JSON.parse(content), null, " ");
} catch (e) {
return content;
}
})
.then((content) => {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.content = content;
const tool = FunctionToolService.add(plugin, true);
plugin.title = tool.api.definition.info.title;
plugin.version = tool.api.definition.info.version;
});
})
.catch((e) => {
showToast(Locale.Plugin.EditModal.Error);
});
return (
<ErrorBoundary>
<div className={styles["mask-page"]}>
<div className="window-header">
<div className="window-header-title">
<div className="window-header-main-title">
{Locale.Plugin.Page.Title}
</div>
<div className="window-header-submai-title">
{Locale.Plugin.Page.SubTitle(plugins.length)}
</div>
</div>
<div className="window-actions">
<div className="window-action-button">
<a
href={PLUGINS_REPO_URL}
target="_blank"
rel="noopener noreferrer"
>
<IconButton icon={<GithubIcon />} bordered />
</a>
</div>
<div className="window-action-button">
<IconButton
icon={<CloseIcon />}
bordered
onClick={() => navigate(-1)}
/>
</div>
</div>
</div>
<div className={styles["mask-page-body"]}>
<div className={styles["mask-filter"]}>
<input
type="text"
className={styles["search-bar"]}
placeholder={Locale.Plugin.Page.Search}
autoFocus
onInput={(e) => onSearch(e.currentTarget.value)}
/>
<IconButton
className={styles["mask-create"]}
icon={<AddIcon />}
text={Locale.Plugin.Page.Create}
bordered
onClick={() => {
const createdPlugin = pluginStore.create();
setEditingPluginId(createdPlugin.id);
}}
/>
</div>
<div>
{plugins.length == 0 && (
<div
style={{
display: "flex",
margin: "60px auto",
alignItems: "center",
justifyContent: "center",
}}
>
{Locale.Plugin.Page.Find}
<a
href={PLUGINS_REPO_URL}
target="_blank"
rel="noopener noreferrer"
style={{ marginLeft: 16 }}
>
<IconButton icon={<GithubIcon />} bordered />
</a>
</div>
)}
{plugins.map((m) => (
<div className={styles["mask-item"]} key={m.id}>
<div className={styles["mask-header"]}>
<div className={styles["mask-icon"]}></div>
<div className={styles["mask-title"]}>
<div className={styles["mask-name"]}>
{m.title}@<small>{m.version}</small>
</div>
<div className={clsx(styles["mask-info"], "one-line")}>
{Locale.Plugin.Item.Info(
FunctionToolService.add(m).length,
)}
</div>
</div>
</div>
<div className={styles["mask-actions"]}>
<IconButton
icon={<EditIcon />}
text={Locale.Plugin.Item.Edit}
onClick={() => setEditingPluginId(m.id)}
/>
{!m.builtin && (
<IconButton
icon={<DeleteIcon />}
text={Locale.Plugin.Item.Delete}
onClick={async () => {
if (
await showConfirm(Locale.Plugin.Item.DeleteConfirm)
) {
pluginStore.delete(m.id);
}
}}
/>
)}
</div>
</div>
))}
</div>
</div>
</div>
{editingPlugin && (
<div className="modal-mask">
<Modal
title={Locale.Plugin.EditModal.Title(editingPlugin?.builtin)}
onClose={closePluginModal}
actions={[
<IconButton
icon={<ConfirmIcon />}
text={Locale.UI.Confirm}
key="export"
bordered
onClick={() => setEditingPluginId("")}
/>,
]}
>
<List>
<ListItem title={Locale.Plugin.EditModal.Auth}>
<select
value={editingPlugin?.authType}
onChange={(e) => {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.authType = e.target.value;
});
}}
>
<option value="">{Locale.Plugin.Auth.None}</option>
<option value="bearer">{Locale.Plugin.Auth.Bearer}</option>
<option value="basic">{Locale.Plugin.Auth.Basic}</option>
<option value="custom">{Locale.Plugin.Auth.Custom}</option>
</select>
</ListItem>
{["bearer", "basic", "custom"].includes(
editingPlugin.authType as string,
) && (
<ListItem title={Locale.Plugin.Auth.Location}>
<select
value={editingPlugin?.authLocation}
onChange={(e) => {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.authLocation = e.target.value;
});
}}
>
<option value="header">
{Locale.Plugin.Auth.LocationHeader}
</option>
<option value="query">
{Locale.Plugin.Auth.LocationQuery}
</option>
<option value="body">
{Locale.Plugin.Auth.LocationBody}
</option>
</select>
</ListItem>
)}
{editingPlugin.authType == "custom" && (
<ListItem title={Locale.Plugin.Auth.CustomHeader}>
<input
type="text"
value={editingPlugin?.authHeader}
onChange={(e) => {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.authHeader = e.target.value;
});
}}
></input>
</ListItem>
)}
{["bearer", "basic", "custom"].includes(
editingPlugin.authType as string,
) && (
<ListItem title={Locale.Plugin.Auth.Token}>
<PasswordInput
type="text"
value={editingPlugin?.authToken}
onChange={(e) => {
pluginStore.updatePlugin(editingPlugin.id, (plugin) => {
plugin.authToken = e.currentTarget.value;
});
}}
></PasswordInput>
</ListItem>
)}
</List>
<List>
<ListItem title={Locale.Plugin.EditModal.Content}>
<div className={pluginStyles["plugin-schema"]}>
<input
type="text"
style={{ minWidth: 200 }}
onInput={(e) => setLoadUrl(e.currentTarget.value)}
></input>
<IconButton
icon={<ReloadIcon />}
text={Locale.Plugin.EditModal.Load}
bordered
onClick={() => loadFromUrl(loadUrl)}
/>
</div>
</ListItem>
<ListItem
subTitle={
<div
className={clsx(
"markdown-body",
pluginStyles["plugin-content"],
)}
dir="auto"
>
<pre>
<code
contentEditable={true}
dangerouslySetInnerHTML={{
__html: editingPlugin.content,
}}
onBlur={onChangePlugin}
></code>
</pre>
</div>
}
></ListItem>
{editingPluginTool?.tools.map((tool, index) => (
<ListItem
key={index}
title={tool?.function?.name}
subTitle={tool?.function?.description}
/>
))}
</List>
</Modal>
</div>
)}
</ErrorBoundary>
);
}

View File

@ -0,0 +1 @@
export * from "./realtime-chat";

View File

@ -0,0 +1,74 @@
.realtime-chat {
width: 100%;
justify-content: center;
align-items: center;
position: relative;
display: flex;
flex-direction: column;
height: 100%;
padding: 20px;
box-sizing: border-box;
.circle-mic {
width: 150px;
height: 150px;
border-radius: 50%;
background: linear-gradient(to bottom right, #a0d8ef, #f0f8ff);
display: flex;
justify-content: center;
align-items: center;
}
.icon-center {
font-size: 24px;
}
.bottom-icons {
display: flex;
justify-content: space-between;
align-items: center;
width: 100%;
position: absolute;
bottom: 20px;
box-sizing: border-box;
padding: 0 20px;
}
.icon-left,
.icon-right {
width: 46px;
height: 46px;
font-size: 36px;
background: var(--second);
border-radius: 50%;
padding: 2px;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
&:hover {
opacity: 0.8;
}
}
&.mobile {
display: none;
}
}
.pulse {
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
transform: scale(1);
opacity: 0.7;
}
50% {
transform: scale(1.1);
opacity: 1;
}
100% {
transform: scale(1);
opacity: 0.7;
}
}

View File

@ -0,0 +1,359 @@
import VoiceIcon from "@/app/icons/voice.svg";
import VoiceOffIcon from "@/app/icons/voice-off.svg";
import PowerIcon from "@/app/icons/power.svg";
import styles from "./realtime-chat.module.scss";
import clsx from "clsx";
import { useState, useRef, useEffect } from "react";
import { useChatStore, createMessage, useAppConfig } from "@/app/store";
import { IconButton } from "@/app/components/button";
import {
Modality,
RTClient,
RTInputAudioItem,
RTResponse,
TurnDetection,
} from "rt-client";
import { AudioHandler } from "@/app/lib/audio";
import { uploadImage } from "@/app/utils/chat";
import { VoicePrint } from "@/app/components/voice-print";
interface RealtimeChatProps {
onClose?: () => void;
onStartVoice?: () => void;
onPausedVoice?: () => void;
}
export function RealtimeChat({
onClose,
onStartVoice,
onPausedVoice,
}: RealtimeChatProps) {
const chatStore = useChatStore();
const session = chatStore.currentSession();
const config = useAppConfig();
const [status, setStatus] = useState("");
const [isRecording, setIsRecording] = useState(false);
const [isConnected, setIsConnected] = useState(false);
const [isConnecting, setIsConnecting] = useState(false);
const [modality, setModality] = useState("audio");
const [useVAD, setUseVAD] = useState(true);
const [frequencies, setFrequencies] = useState<Uint8Array | undefined>();
const clientRef = useRef<RTClient | null>(null);
const audioHandlerRef = useRef<AudioHandler | null>(null);
const initRef = useRef(false);
const temperature = config.realtimeConfig.temperature;
const apiKey = config.realtimeConfig.apiKey;
const model = config.realtimeConfig.model;
const azure = config.realtimeConfig.provider === "Azure";
const azureEndpoint = config.realtimeConfig.azure.endpoint;
const azureDeployment = config.realtimeConfig.azure.deployment;
const voice = config.realtimeConfig.voice;
const handleConnect = async () => {
if (isConnecting) return;
if (!isConnected) {
try {
setIsConnecting(true);
clientRef.current = azure
? new RTClient(
new URL(azureEndpoint),
{ key: apiKey },
{ deployment: azureDeployment },
)
: new RTClient({ key: apiKey }, { model });
const modalities: Modality[] =
modality === "audio" ? ["text", "audio"] : ["text"];
const turnDetection: TurnDetection = useVAD
? { type: "server_vad" }
: null;
await clientRef.current.configure({
instructions: "",
voice,
input_audio_transcription: { model: "whisper-1" },
turn_detection: turnDetection,
tools: [],
temperature,
modalities,
});
startResponseListener();
setIsConnected(true);
// TODO
// try {
// const recentMessages = chatStore.getMessagesWithMemory();
// for (const message of recentMessages) {
// const { role, content } = message;
// if (typeof content === "string") {
// await clientRef.current.sendItem({
// type: "message",
// role: role as any,
// content: [
// {
// type: (role === "assistant" ? "text" : "input_text") as any,
// text: content as string,
// },
// ],
// });
// }
// }
// // await clientRef.current.generateResponse();
// } catch (error) {
// console.error("Set message failed:", error);
// }
} catch (error) {
console.error("Connection failed:", error);
setStatus("Connection failed");
} finally {
setIsConnecting(false);
}
} else {
await disconnect();
}
};
const disconnect = async () => {
if (clientRef.current) {
try {
await clientRef.current.close();
clientRef.current = null;
setIsConnected(false);
} catch (error) {
console.error("Disconnect failed:", error);
}
}
};
const startResponseListener = async () => {
if (!clientRef.current) return;
try {
for await (const serverEvent of clientRef.current.events()) {
if (serverEvent.type === "response") {
await handleResponse(serverEvent);
} else if (serverEvent.type === "input_audio") {
await handleInputAudio(serverEvent);
}
}
} catch (error) {
if (clientRef.current) {
console.error("Response iteration error:", error);
}
}
};
const handleResponse = async (response: RTResponse) => {
for await (const item of response) {
if (item.type === "message" && item.role === "assistant") {
const botMessage = createMessage({
role: item.role,
content: "",
});
// add bot message first
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([botMessage]);
});
let hasAudio = false;
for await (const content of item) {
if (content.type === "text") {
for await (const text of content.textChunks()) {
botMessage.content += text;
}
} else if (content.type === "audio") {
const textTask = async () => {
for await (const text of content.transcriptChunks()) {
botMessage.content += text;
}
};
const audioTask = async () => {
audioHandlerRef.current?.startStreamingPlayback();
for await (const audio of content.audioChunks()) {
hasAudio = true;
audioHandlerRef.current?.playChunk(audio);
}
};
await Promise.all([textTask(), audioTask()]);
}
// update message.content
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
}
if (hasAudio) {
// upload audio get audio_url
const blob = audioHandlerRef.current?.savePlayFile();
uploadImage(blob!).then((audio_url) => {
botMessage.audio_url = audio_url;
// update text and audio_url
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
}
}
};
const handleInputAudio = async (item: RTInputAudioItem) => {
await item.waitForCompletion();
if (item.transcription) {
const userMessage = createMessage({
role: "user",
content: item.transcription,
});
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat([userMessage]);
});
// save input audio_url, and update session
const { audioStartMillis, audioEndMillis } = item;
// upload audio get audio_url
const blob = audioHandlerRef.current?.saveRecordFile(
audioStartMillis,
audioEndMillis,
);
uploadImage(blob!).then((audio_url) => {
userMessage.audio_url = audio_url;
chatStore.updateTargetSession(session, (session) => {
session.messages = session.messages.concat();
});
});
}
// stop streaming play after get input audio.
audioHandlerRef.current?.stopStreamingPlayback();
};
const toggleRecording = async () => {
if (!isRecording && clientRef.current) {
try {
if (!audioHandlerRef.current) {
audioHandlerRef.current = new AudioHandler();
await audioHandlerRef.current.initialize();
}
await audioHandlerRef.current.startRecording(async (chunk) => {
await clientRef.current?.sendAudio(chunk);
});
setIsRecording(true);
} catch (error) {
console.error("Failed to start recording:", error);
}
} else if (audioHandlerRef.current) {
try {
audioHandlerRef.current.stopRecording();
if (!useVAD) {
const inputAudio = await clientRef.current?.commitAudio();
await handleInputAudio(inputAudio!);
await clientRef.current?.generateResponse();
}
setIsRecording(false);
} catch (error) {
console.error("Failed to stop recording:", error);
}
}
};
useEffect(() => {
// 防止重复初始化
if (initRef.current) return;
initRef.current = true;
const initAudioHandler = async () => {
const handler = new AudioHandler();
await handler.initialize();
audioHandlerRef.current = handler;
await handleConnect();
await toggleRecording();
};
initAudioHandler().catch((error) => {
setStatus(error);
console.error(error);
});
return () => {
if (isRecording) {
toggleRecording();
}
audioHandlerRef.current?.close().catch(console.error);
disconnect();
};
}, []);
useEffect(() => {
let animationFrameId: number;
if (isConnected && isRecording) {
const animationFrame = () => {
if (audioHandlerRef.current) {
const freqData = audioHandlerRef.current.getByteFrequencyData();
setFrequencies(freqData);
}
animationFrameId = requestAnimationFrame(animationFrame);
};
animationFrameId = requestAnimationFrame(animationFrame);
} else {
setFrequencies(undefined);
}
return () => {
if (animationFrameId) {
cancelAnimationFrame(animationFrameId);
}
};
}, [isConnected, isRecording]);
// update session params
useEffect(() => {
clientRef.current?.configure({ voice });
}, [voice]);
useEffect(() => {
clientRef.current?.configure({ temperature });
}, [temperature]);
const handleClose = async () => {
onClose?.();
if (isRecording) {
await toggleRecording();
}
disconnect().catch(console.error);
};
return (
<div className={styles["realtime-chat"]}>
<div
className={clsx(styles["circle-mic"], {
[styles["pulse"]]: isRecording,
})}
>
<VoicePrint frequencies={frequencies} isActive={isRecording} />
</div>
<div className={styles["bottom-icons"]}>
<div>
<IconButton
icon={isRecording ? <VoiceIcon /> : <VoiceOffIcon />}
onClick={toggleRecording}
disabled={!isConnected}
shadow
bordered
/>
</div>
<div className={styles["icon-center"]}>{status}</div>
<div>
<IconButton
icon={<PowerIcon />}
onClick={handleClose}
shadow
bordered
/>
</div>
</div>
</div>
);
}

View File

@ -0,0 +1,173 @@
import { RealtimeConfig } from "@/app/store";
import Locale from "@/app/locales";
import { ListItem, Select, PasswordInput } from "@/app/components/ui-lib";
import { InputRange } from "@/app/components/input-range";
import { Voice } from "rt-client";
import { ServiceProvider } from "@/app/constant";
const providers = [ServiceProvider.OpenAI, ServiceProvider.Azure];
const models = ["gpt-4o-realtime-preview-2024-10-01"];
const voice = ["alloy", "shimmer", "echo"];
export function RealtimeConfigList(props: {
realtimeConfig: RealtimeConfig;
updateConfig: (updater: (config: RealtimeConfig) => void) => void;
}) {
const azureConfigComponent = props.realtimeConfig.provider ===
ServiceProvider.Azure && (
<>
<ListItem
title={Locale.Settings.Realtime.Azure.Endpoint.Title}
subTitle={Locale.Settings.Realtime.Azure.Endpoint.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.endpoint}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Endpoint.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.endpoint = e.currentTarget.value),
);
}}
/>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Azure.Deployment.Title}
subTitle={Locale.Settings.Realtime.Azure.Deployment.SubTitle}
>
<input
value={props.realtimeConfig?.azure?.deployment}
type="text"
placeholder={Locale.Settings.Realtime.Azure.Deployment.Title}
onChange={(e) => {
props.updateConfig(
(config) => (config.azure.deployment = e.currentTarget.value),
);
}}
/>
</ListItem>
</>
);
return (
<>
<ListItem
title={Locale.Settings.Realtime.Enable.Title}
subTitle={Locale.Settings.Realtime.Enable.SubTitle}
>
<input
type="checkbox"
checked={props.realtimeConfig.enable}
onChange={(e) =>
props.updateConfig(
(config) => (config.enable = e.currentTarget.checked),
)
}
></input>
</ListItem>
{props.realtimeConfig.enable && (
<>
<ListItem
title={Locale.Settings.Realtime.Provider.Title}
subTitle={Locale.Settings.Realtime.Provider.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Provider.Title}
value={props.realtimeConfig.provider}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.provider = e.target.value as ServiceProvider),
);
}}
>
{providers.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Model.Title}
subTitle={Locale.Settings.Realtime.Model.SubTitle}
>
<Select
aria-label={Locale.Settings.Realtime.Model.Title}
value={props.realtimeConfig.model}
onChange={(e) => {
props.updateConfig((config) => (config.model = e.target.value));
}}
>
{models.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.ApiKey.Title}
subTitle={Locale.Settings.Realtime.ApiKey.SubTitle}
>
<PasswordInput
aria={Locale.Settings.ShowPassword}
aria-label={Locale.Settings.Realtime.ApiKey.Title}
value={props.realtimeConfig.apiKey}
type="text"
placeholder={Locale.Settings.Realtime.ApiKey.Placeholder}
onChange={(e) => {
props.updateConfig(
(config) => (config.apiKey = e.currentTarget.value),
);
}}
/>
</ListItem>
{azureConfigComponent}
<ListItem
title={Locale.Settings.TTS.Voice.Title}
subTitle={Locale.Settings.TTS.Voice.SubTitle}
>
<Select
value={props.realtimeConfig.voice}
onChange={(e) => {
props.updateConfig(
(config) => (config.voice = e.currentTarget.value as Voice),
);
}}
>
{voice.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.Realtime.Temperature.Title}
subTitle={Locale.Settings.Realtime.Temperature.SubTitle}
>
<InputRange
aria={Locale.Settings.Temperature.Title}
value={props.realtimeConfig?.temperature?.toFixed(1)}
min="0.6"
max="1"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.temperature = e.currentTarget.valueAsNumber),
);
}}
></InputRange>
</ListItem>
</>
)}
</>
);
}

View File

@ -0,0 +1,2 @@
export * from "./sd";
export * from "./sd-panel";

View File

@ -0,0 +1,45 @@
.ctrl-param-item {
display: flex;
justify-content: space-between;
min-height: 40px;
padding: 10px 0;
animation: slide-in ease 0.6s;
flex-direction: column;
.ctrl-param-item-header {
display: flex;
align-items: center;
.ctrl-param-item-title {
font-size: 14px;
font-weight: bolder;
margin-bottom: 5px;
}
}
.ctrl-param-item-sub-title {
font-size: 12px;
font-weight: normal;
margin-top: 3px;
}
textarea {
appearance: none;
border-radius: 10px;
border: var(--border-in-light);
min-height: 36px;
box-sizing: border-box;
background: var(--white);
color: var(--black);
padding: 0 10px;
max-width: 50%;
font-family: inherit;
}
}
.ai-models {
button {
margin-bottom: 10px;
padding: 10px;
width: 100%;
}
}

View File

@ -0,0 +1,321 @@
import styles from "./sd-panel.module.scss";
import React from "react";
import { Select } from "@/app/components/ui-lib";
import { IconButton } from "@/app/components/button";
import Locale from "@/app/locales";
import { useSdStore } from "@/app/store/sd";
import clsx from "clsx";
export const params = [
{
name: Locale.SdPanel.Prompt,
value: "prompt",
type: "textarea",
placeholder: Locale.SdPanel.PleaseInput(Locale.SdPanel.Prompt),
required: true,
},
{
name: Locale.SdPanel.ModelVersion,
value: "model",
type: "select",
default: "sd3-medium",
support: ["sd3"],
options: [
{ name: "SD3 Medium", value: "sd3-medium" },
{ name: "SD3 Large", value: "sd3-large" },
{ name: "SD3 Large Turbo", value: "sd3-large-turbo" },
],
},
{
name: Locale.SdPanel.NegativePrompt,
value: "negative_prompt",
type: "textarea",
placeholder: Locale.SdPanel.PleaseInput(Locale.SdPanel.NegativePrompt),
},
{
name: Locale.SdPanel.AspectRatio,
value: "aspect_ratio",
type: "select",
default: "1:1",
options: [
{ name: "1:1", value: "1:1" },
{ name: "16:9", value: "16:9" },
{ name: "21:9", value: "21:9" },
{ name: "2:3", value: "2:3" },
{ name: "3:2", value: "3:2" },
{ name: "4:5", value: "4:5" },
{ name: "5:4", value: "5:4" },
{ name: "9:16", value: "9:16" },
{ name: "9:21", value: "9:21" },
],
},
{
name: Locale.SdPanel.ImageStyle,
value: "style",
type: "select",
default: "3d-model",
support: ["core"],
options: [
{ name: Locale.SdPanel.Styles.D3Model, value: "3d-model" },
{ name: Locale.SdPanel.Styles.AnalogFilm, value: "analog-film" },
{ name: Locale.SdPanel.Styles.Anime, value: "anime" },
{ name: Locale.SdPanel.Styles.Cinematic, value: "cinematic" },
{ name: Locale.SdPanel.Styles.ComicBook, value: "comic-book" },
{ name: Locale.SdPanel.Styles.DigitalArt, value: "digital-art" },
{ name: Locale.SdPanel.Styles.Enhance, value: "enhance" },
{ name: Locale.SdPanel.Styles.FantasyArt, value: "fantasy-art" },
{ name: Locale.SdPanel.Styles.Isometric, value: "isometric" },
{ name: Locale.SdPanel.Styles.LineArt, value: "line-art" },
{ name: Locale.SdPanel.Styles.LowPoly, value: "low-poly" },
{
name: Locale.SdPanel.Styles.ModelingCompound,
value: "modeling-compound",
},
{ name: Locale.SdPanel.Styles.NeonPunk, value: "neon-punk" },
{ name: Locale.SdPanel.Styles.Origami, value: "origami" },
{ name: Locale.SdPanel.Styles.Photographic, value: "photographic" },
{ name: Locale.SdPanel.Styles.PixelArt, value: "pixel-art" },
{ name: Locale.SdPanel.Styles.TileTexture, value: "tile-texture" },
],
},
{
name: "Seed",
value: "seed",
type: "number",
default: 0,
min: 0,
max: 4294967294,
},
{
name: Locale.SdPanel.OutFormat,
value: "output_format",
type: "select",
default: "png",
options: [
{ name: "PNG", value: "png" },
{ name: "JPEG", value: "jpeg" },
{ name: "WebP", value: "webp" },
],
},
];
const sdCommonParams = (model: string, data: any) => {
return params.filter((item) => {
return !(item.support && !item.support.includes(model));
});
};
export const models = [
{
name: "Stable Image Ultra",
value: "ultra",
params: (data: any) => sdCommonParams("ultra", data),
},
{
name: "Stable Image Core",
value: "core",
params: (data: any) => sdCommonParams("core", data),
},
{
name: "Stable Diffusion 3",
value: "sd3",
params: (data: any) => {
return sdCommonParams("sd3", data).filter((item) => {
return !(
data.model === "sd3-large-turbo" && item.value == "negative_prompt"
);
});
},
},
];
export function ControlParamItem(props: {
title: string;
subTitle?: string;
required?: boolean;
children?: JSX.Element | JSX.Element[];
className?: string;
}) {
return (
<div className={clsx(styles["ctrl-param-item"], props.className)}>
<div className={styles["ctrl-param-item-header"]}>
<div className={styles["ctrl-param-item-title"]}>
<div>
{props.title}
{props.required && <span style={{ color: "red" }}>*</span>}
</div>
</div>
</div>
{props.children}
{props.subTitle && (
<div className={styles["ctrl-param-item-sub-title"]}>
{props.subTitle}
</div>
)}
</div>
);
}
export function ControlParam(props: {
columns: any[];
data: any;
onChange: (field: string, val: any) => void;
}) {
return (
<>
{props.columns?.map((item) => {
let element: null | JSX.Element;
switch (item.type) {
case "textarea":
element = (
<ControlParamItem
title={item.name}
subTitle={item.sub}
required={item.required}
>
<textarea
rows={item.rows || 3}
style={{ maxWidth: "100%", width: "100%", padding: "10px" }}
placeholder={item.placeholder}
onChange={(e) => {
props.onChange(item.value, e.currentTarget.value);
}}
value={props.data[item.value]}
></textarea>
</ControlParamItem>
);
break;
case "select":
element = (
<ControlParamItem
title={item.name}
subTitle={item.sub}
required={item.required}
>
<Select
aria-label={item.name}
value={props.data[item.value]}
onChange={(e) => {
props.onChange(item.value, e.currentTarget.value);
}}
>
{item.options.map((opt: any) => {
return (
<option value={opt.value} key={opt.value}>
{opt.name}
</option>
);
})}
</Select>
</ControlParamItem>
);
break;
case "number":
element = (
<ControlParamItem
title={item.name}
subTitle={item.sub}
required={item.required}
>
<input
aria-label={item.name}
type="number"
min={item.min}
max={item.max}
value={props.data[item.value] || 0}
onChange={(e) => {
props.onChange(item.value, parseInt(e.currentTarget.value));
}}
/>
</ControlParamItem>
);
break;
default:
element = (
<ControlParamItem
title={item.name}
subTitle={item.sub}
required={item.required}
>
<input
aria-label={item.name}
type="text"
value={props.data[item.value]}
style={{ maxWidth: "100%", width: "100%" }}
onChange={(e) => {
props.onChange(item.value, e.currentTarget.value);
}}
/>
</ControlParamItem>
);
}
return <div key={item.value}>{element}</div>;
})}
</>
);
}
export const getModelParamBasicData = (
columns: any[],
data: any,
clearText?: boolean,
) => {
const newParams: any = {};
columns.forEach((item: any) => {
if (clearText && ["text", "textarea", "number"].includes(item.type)) {
newParams[item.value] = item.default || "";
} else {
// @ts-ignore
newParams[item.value] = data[item.value] || item.default || "";
}
});
return newParams;
};
export const getParams = (model: any, params: any) => {
return models.find((m) => m.value === model.value)?.params(params) || [];
};
export function SdPanel() {
const sdStore = useSdStore();
const currentModel = sdStore.currentModel;
const setCurrentModel = sdStore.setCurrentModel;
const params = sdStore.currentParams;
const setParams = sdStore.setCurrentParams;
const handleValueChange = (field: string, val: any) => {
setParams({
...params,
[field]: val,
});
};
const handleModelChange = (model: any) => {
setCurrentModel(model);
setParams(getModelParamBasicData(model.params({}), params));
};
return (
<>
<ControlParamItem title={Locale.SdPanel.AIModel}>
<div className={styles["ai-models"]}>
{models.map((item) => {
return (
<IconButton
text={item.name}
key={item.value}
type={currentModel.value == item.value ? "primary" : null}
shadow
onClick={() => handleModelChange(item)}
/>
);
})}
</div>
</ControlParamItem>
<ControlParam
columns={getParams?.(currentModel, params) as any[]}
data={params}
onChange={handleValueChange}
></ControlParam>
</>
);
}

View File

@ -0,0 +1,140 @@
import { IconButton } from "@/app/components/button";
import GithubIcon from "@/app/icons/github.svg";
import SDIcon from "@/app/icons/sd.svg";
import ReturnIcon from "@/app/icons/return.svg";
import HistoryIcon from "@/app/icons/history.svg";
import Locale from "@/app/locales";
import { Path, REPO_URL } from "@/app/constant";
import { useNavigate } from "react-router-dom";
import dynamic from "next/dynamic";
import {
SideBarContainer,
SideBarBody,
SideBarHeader,
SideBarTail,
useDragSideBar,
useHotKey,
} from "@/app/components/sidebar";
import { getParams, getModelParamBasicData } from "./sd-panel";
import { useSdStore } from "@/app/store/sd";
import { showToast } from "@/app/components/ui-lib";
import { useMobileScreen } from "@/app/utils";
const SdPanel = dynamic(
async () => (await import("@/app/components/sd")).SdPanel,
{
loading: () => null,
},
);
export function SideBar(props: { className?: string }) {
useHotKey();
const isMobileScreen = useMobileScreen();
const { onDragStart, shouldNarrow } = useDragSideBar();
const navigate = useNavigate();
const sdStore = useSdStore();
const currentModel = sdStore.currentModel;
const params = sdStore.currentParams;
const setParams = sdStore.setCurrentParams;
const handleSubmit = () => {
const columns = getParams?.(currentModel, params);
const reqParams: any = {};
for (let i = 0; i < columns.length; i++) {
const item = columns[i];
reqParams[item.value] = params[item.value] ?? null;
if (item.required) {
if (!reqParams[item.value]) {
showToast(Locale.SdPanel.ParamIsRequired(item.name));
return;
}
}
}
let data: any = {
model: currentModel.value,
model_name: currentModel.name,
status: "wait",
params: reqParams,
created_at: new Date().toLocaleString(),
img_data: "",
};
sdStore.sendTask(data, () => {
setParams(getModelParamBasicData(columns, params, true));
navigate(Path.SdNew);
});
};
return (
<SideBarContainer
onDragStart={onDragStart}
shouldNarrow={shouldNarrow}
{...props}
>
{isMobileScreen ? (
<div
className="window-header"
data-tauri-drag-region
style={{
paddingLeft: 0,
paddingRight: 0,
}}
>
<div className="window-actions">
<div className="window-action-button">
<IconButton
icon={<ReturnIcon />}
bordered
title={Locale.Sd.Actions.ReturnHome}
onClick={() => navigate(Path.Home)}
/>
</div>
</div>
<SDIcon width={50} height={50} />
<div className="window-actions">
<div className="window-action-button">
<IconButton
icon={<HistoryIcon />}
bordered
title={Locale.Sd.Actions.History}
onClick={() => navigate(Path.SdNew)}
/>
</div>
</div>
</div>
) : (
<SideBarHeader
title={
<IconButton
icon={<ReturnIcon />}
bordered
title={Locale.Sd.Actions.ReturnHome}
onClick={() => navigate(Path.Home)}
/>
}
logo={<SDIcon width={38} height={"100%"} />}
></SideBarHeader>
)}
<SideBarBody>
<SdPanel />
</SideBarBody>
<SideBarTail
primaryAction={
<a href={REPO_URL} target="_blank" rel="noopener noreferrer">
<IconButton icon={<GithubIcon />} shadow />
</a>
}
secondaryAction={
<IconButton
text={Locale.SdPanel.Submit}
type="primary"
shadow
onClick={handleSubmit}
></IconButton>
}
/>
</SideBarContainer>
);
}

View File

@ -0,0 +1,53 @@
.sd-img-list{
display: flex;
flex-wrap: wrap;
justify-content: space-between;
.sd-img-item{
width: 48%;
.sd-img-item-info{
flex:1;
width: 100%;
overflow: hidden;
user-select: text;
p{
margin: 6px;
font-size: 12px;
}
.line-1{
overflow: hidden;
white-space: nowrap;
text-overflow: ellipsis;
}
}
.pre-img{
display: flex;
width: 130px;
justify-content: center;
align-items: center;
background-color: var(--second);
border-radius: 10px;
}
.img{
width: 130px;
height: 130px;
border-radius: 10px;
overflow: hidden;
cursor: pointer;
transition: all .3s;
&:hover{
opacity: .7;
}
}
&:not(:last-child){
margin-bottom: 20px;
}
}
}
@media only screen and (max-width: 600px) {
.sd-img-list{
.sd-img-item{
width: 100%;
}
}
}

340
app/components/sd/sd.tsx Normal file
View File

@ -0,0 +1,340 @@
import chatStyles from "@/app/components/chat.module.scss";
import styles from "@/app/components/sd/sd.module.scss";
import homeStyles from "@/app/components/home.module.scss";
import { IconButton } from "@/app/components/button";
import ReturnIcon from "@/app/icons/return.svg";
import Locale from "@/app/locales";
import { Path } from "@/app/constant";
import React, { useEffect, useMemo, useRef, useState } from "react";
import {
copyToClipboard,
getMessageTextContent,
useMobileScreen,
} from "@/app/utils";
import { useNavigate, useLocation } from "react-router-dom";
import { useAppConfig } from "@/app/store";
import MinIcon from "@/app/icons/min.svg";
import MaxIcon from "@/app/icons/max.svg";
import { getClientConfig } from "@/app/config/client";
import { ChatAction } from "@/app/components/chat";
import DeleteIcon from "@/app/icons/clear.svg";
import CopyIcon from "@/app/icons/copy.svg";
import PromptIcon from "@/app/icons/prompt.svg";
import ResetIcon from "@/app/icons/reload.svg";
import { useSdStore } from "@/app/store/sd";
import LoadingIcon from "@/app/icons/three-dots.svg";
import ErrorIcon from "@/app/icons/delete.svg";
import SDIcon from "@/app/icons/sd.svg";
import { Property } from "csstype";
import {
showConfirm,
showImageModal,
showModal,
} from "@/app/components/ui-lib";
import { removeImage } from "@/app/utils/chat";
import { SideBar } from "./sd-sidebar";
import { WindowContent } from "@/app/components/home";
import { params } from "./sd-panel";
import clsx from "clsx";
function getSdTaskStatus(item: any) {
let s: string;
let color: Property.Color | undefined = undefined;
switch (item.status) {
case "success":
s = Locale.Sd.Status.Success;
color = "green";
break;
case "error":
s = Locale.Sd.Status.Error;
color = "red";
break;
case "wait":
s = Locale.Sd.Status.Wait;
color = "yellow";
break;
case "running":
s = Locale.Sd.Status.Running;
color = "blue";
break;
default:
s = item.status.toUpperCase();
}
return (
<p className={styles["line-1"]} title={item.error} style={{ color: color }}>
<span>
{Locale.Sd.Status.Name}: {s}
</span>
{item.status === "error" && (
<span
className="clickable"
onClick={() => {
showModal({
title: Locale.Sd.Detail,
children: (
<div style={{ color: color, userSelect: "text" }}>
{item.error}
</div>
),
});
}}
>
- {item.error}
</span>
)}
</p>
);
}
export function Sd() {
const isMobileScreen = useMobileScreen();
const navigate = useNavigate();
const location = useLocation();
const clientConfig = useMemo(() => getClientConfig(), []);
const showMaxIcon = !isMobileScreen && !clientConfig?.isApp;
const config = useAppConfig();
const scrollRef = useRef<HTMLDivElement>(null);
const sdStore = useSdStore();
const [sdImages, setSdImages] = useState(sdStore.draw);
const isSd = location.pathname === Path.Sd;
useEffect(() => {
setSdImages(sdStore.draw);
}, [sdStore.currentId]);
return (
<>
<SideBar className={clsx({ [homeStyles["sidebar-show"]]: isSd })} />
<WindowContent>
<div className={chatStyles.chat} key={"1"}>
<div className="window-header" data-tauri-drag-region>
{isMobileScreen && (
<div className="window-actions">
<div className={"window-action-button"}>
<IconButton
icon={<ReturnIcon />}
bordered
title={Locale.Chat.Actions.ChatList}
onClick={() => navigate(Path.Sd)}
/>
</div>
</div>
)}
<div
className={clsx(
"window-header-title",
chatStyles["chat-body-title"],
)}
>
<div className={`window-header-main-title`}>Stability AI</div>
<div className="window-header-sub-title">
{Locale.Sd.SubTitle(sdImages.length || 0)}
</div>
</div>
<div className="window-actions">
{showMaxIcon && (
<div className="window-action-button">
<IconButton
aria={Locale.Chat.Actions.FullScreen}
icon={config.tightBorder ? <MinIcon /> : <MaxIcon />}
bordered
onClick={() => {
config.update(
(config) => (config.tightBorder = !config.tightBorder),
);
}}
/>
</div>
)}
{isMobileScreen && <SDIcon width={50} height={50} />}
</div>
</div>
<div className={chatStyles["chat-body"]} ref={scrollRef}>
<div className={styles["sd-img-list"]}>
{sdImages.length > 0 ? (
sdImages.map((item: any) => {
return (
<div
key={item.id}
style={{ display: "flex" }}
className={styles["sd-img-item"]}
>
{item.status === "success" ? (
<img
className={styles["img"]}
src={item.img_data}
alt={item.id}
onClick={(e) =>
showImageModal(
item.img_data,
true,
isMobileScreen
? { width: "100%", height: "fit-content" }
: { maxWidth: "100%", maxHeight: "100%" },
isMobileScreen
? { width: "100%", height: "fit-content" }
: { width: "100%", height: "100%" },
)
}
/>
) : item.status === "error" ? (
<div className={styles["pre-img"]}>
<ErrorIcon />
</div>
) : (
<div className={styles["pre-img"]}>
<LoadingIcon />
</div>
)}
<div
style={{ marginLeft: "10px" }}
className={styles["sd-img-item-info"]}
>
<p className={styles["line-1"]}>
{Locale.SdPanel.Prompt}:{" "}
<span
className="clickable"
title={item.params.prompt}
onClick={() => {
showModal({
title: Locale.Sd.Detail,
children: (
<div style={{ userSelect: "text" }}>
{item.params.prompt}
</div>
),
});
}}
>
{item.params.prompt}
</span>
</p>
<p>
{Locale.SdPanel.AIModel}: {item.model_name}
</p>
{getSdTaskStatus(item)}
<p>{item.created_at}</p>
<div className={chatStyles["chat-message-actions"]}>
<div className={chatStyles["chat-input-actions"]}>
<ChatAction
text={Locale.Sd.Actions.Params}
icon={<PromptIcon />}
onClick={() => {
showModal({
title: Locale.Sd.GenerateParams,
children: (
<div style={{ userSelect: "text" }}>
{Object.keys(item.params).map((key) => {
let label = key;
let value = item.params[key];
switch (label) {
case "prompt":
label = Locale.SdPanel.Prompt;
break;
case "negative_prompt":
label =
Locale.SdPanel.NegativePrompt;
break;
case "aspect_ratio":
label = Locale.SdPanel.AspectRatio;
break;
case "seed":
label = "Seed";
value = value || 0;
break;
case "output_format":
label = Locale.SdPanel.OutFormat;
value = value?.toUpperCase();
break;
case "style":
label = Locale.SdPanel.ImageStyle;
value = params
.find(
(item) =>
item.value === "style",
)
?.options?.find(
(item) => item.value === value,
)?.name;
break;
default:
break;
}
return (
<div
key={key}
style={{ margin: "10px" }}
>
<strong>{label}: </strong>
{value}
</div>
);
})}
</div>
),
});
}}
/>
<ChatAction
text={Locale.Sd.Actions.Copy}
icon={<CopyIcon />}
onClick={() =>
copyToClipboard(
getMessageTextContent({
role: "user",
content: item.params.prompt,
}),
)
}
/>
<ChatAction
text={Locale.Sd.Actions.Retry}
icon={<ResetIcon />}
onClick={() => {
const reqData = {
model: item.model,
model_name: item.model_name,
status: "wait",
params: { ...item.params },
created_at: new Date().toLocaleString(),
img_data: "",
};
sdStore.sendTask(reqData);
}}
/>
<ChatAction
text={Locale.Sd.Actions.Delete}
icon={<DeleteIcon />}
onClick={async () => {
if (
await showConfirm(Locale.Sd.Danger.Delete)
) {
// remove img_data + remove item in list
removeImage(item.img_data).finally(() => {
sdStore.draw = sdImages.filter(
(i: any) => i.id !== item.id,
);
sdStore.getNextId();
});
}
}}
/>
</div>
</div>
</div>
</div>
);
})
) : (
<div>{Locale.Sd.EmptyRecord}</div>
)}
</div>
</div>
</div>
</WindowContent>
</>
);
}

View File

@ -0,0 +1,167 @@
import { useState, useEffect, useRef, useCallback } from "react";
import { ErrorBoundary } from "./error";
import styles from "./mask.module.scss";
import { useNavigate } from "react-router-dom";
import { IconButton } from "./button";
import CloseIcon from "../icons/close.svg";
import EyeIcon from "../icons/eye.svg";
import Locale from "../locales";
import { Path } from "../constant";
import { useChatStore } from "../store";
type Item = {
id: number;
name: string;
content: string;
};
export function SearchChatPage() {
const navigate = useNavigate();
const chatStore = useChatStore();
const sessions = chatStore.sessions;
const selectSession = chatStore.selectSession;
const [searchResults, setSearchResults] = useState<Item[]>([]);
const previousValueRef = useRef<string>("");
const searchInputRef = useRef<HTMLInputElement>(null);
const doSearch = useCallback((text: string) => {
const lowerCaseText = text.toLowerCase();
const results: Item[] = [];
sessions.forEach((session, index) => {
const fullTextContents: string[] = [];
session.messages.forEach((message) => {
const content = message.content as string;
if (!content.toLowerCase || content === "") return;
const lowerCaseContent = content.toLowerCase();
// full text search
let pos = lowerCaseContent.indexOf(lowerCaseText);
while (pos !== -1) {
const start = Math.max(0, pos - 35);
const end = Math.min(content.length, pos + lowerCaseText.length + 35);
fullTextContents.push(content.substring(start, end));
pos = lowerCaseContent.indexOf(
lowerCaseText,
pos + lowerCaseText.length,
);
}
});
if (fullTextContents.length > 0) {
results.push({
id: index,
name: session.topic,
content: fullTextContents.join("... "), // concat content with...
});
}
});
// sort by length of matching content
results.sort((a, b) => b.content.length - a.content.length);
return results;
}, []);
useEffect(() => {
const intervalId = setInterval(() => {
if (searchInputRef.current) {
const currentValue = searchInputRef.current.value;
if (currentValue !== previousValueRef.current) {
if (currentValue.length > 0) {
const result = doSearch(currentValue);
setSearchResults(result);
}
previousValueRef.current = currentValue;
}
}
}, 1000);
// Cleanup the interval on component unmount
return () => clearInterval(intervalId);
}, [doSearch]);
return (
<ErrorBoundary>
<div className={styles["mask-page"]}>
{/* header */}
<div className="window-header">
<div className="window-header-title">
<div className="window-header-main-title">
{Locale.SearchChat.Page.Title}
</div>
<div className="window-header-submai-title">
{Locale.SearchChat.Page.SubTitle(searchResults.length)}
</div>
</div>
<div className="window-actions">
<div className="window-action-button">
<IconButton
icon={<CloseIcon />}
bordered
onClick={() => navigate(-1)}
/>
</div>
</div>
</div>
<div className={styles["mask-page-body"]}>
<div className={styles["mask-filter"]}>
{/**搜索输入框 */}
<input
type="text"
className={styles["search-bar"]}
placeholder={Locale.SearchChat.Page.Search}
autoFocus
ref={searchInputRef}
onKeyDown={(e) => {
if (e.key === "Enter") {
e.preventDefault();
const searchText = e.currentTarget.value;
if (searchText.length > 0) {
const result = doSearch(searchText);
setSearchResults(result);
}
}
}}
/>
</div>
<div>
{searchResults.map((item) => (
<div
className={styles["mask-item"]}
key={item.id}
onClick={() => {
navigate(Path.Chat);
selectSession(item.id);
}}
style={{ cursor: "pointer" }}
>
{/** 搜索匹配的文本 */}
<div className={styles["mask-header"]}>
<div className={styles["mask-title"]}>
<div className={styles["mask-name"]}>{item.name}</div>
{item.content.slice(0, 70)}
</div>
</div>
{/** 操作按钮 */}
<div className={styles["mask-actions"]}>
<IconButton
icon={<EyeIcon />}
text={Locale.SearchChat.Item.View}
/>
</div>
</div>
))}
</div>
</div>
</div>
</ErrorBoundary>
);
}

View File

@ -5,6 +5,8 @@
.avatar { .avatar {
cursor: pointer; cursor: pointer;
position: relative;
z-index: 1;
} }
.edit-prompt-modal { .edit-prompt-modal {
@ -70,3 +72,9 @@
} }
} }
} }
.subtitle-button {
button {
overflow:visible ;
}
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More