diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..95ed9e268 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,97 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Node.js dependencies +/node_modules +/jspm_packages + +# TypeScript v1 declaration files +typings + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variable files +.env +.env.test + +# local env files +.env*.local + +# Next.js build output +.next +out + +# Nuxt.js build output +.nuxt +dist + +# Gatsby files +.cache/ + + +# Vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# Temporary folders +tmp +temp + +# IDE and editor directories +.idea +.vscode +*.swp +*.swo +*~ + +# OS generated files +.DS_Store +Thumbs.db + +# secret key +*.key +*.key.pub diff --git a/.env.template b/.env.template index 3e3290369..166cc4ef4 100644 --- a/.env.template +++ b/.env.template @@ -8,6 +8,16 @@ CODE=your-password # You can start service behind a proxy PROXY_URL=http://localhost:7890 +# (optional) +# Default: Empty +# Googel Gemini Pro API key, set if you want to use Google Gemini Pro API. +GOOGLE_API_KEY= + +# (optional) +# Default: https://generativelanguage.googleapis.com/ +# Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url. +GOOGLE_URL= + # Override openai api request base url. (optional) # Default: https://api.openai.com # Examples: http://your-openai-proxy.com @@ -36,3 +46,4 @@ ENABLE_BALANCE_QUERY= # Default: Empty # If you want to disable parse settings from url, set this value to 1. DISABLE_FAST_LINK= + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 01fa35e82..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: "[Bug] " -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Deployment** -- [ ] Docker -- [ ] Vercel -- [ ] Server - -**Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] - -**Additional Logs** -Add any logs about the problem here. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..bdba257d2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,146 @@ +name: Bug report +description: Create a report to help us improve +title: "[Bug] " +labels: ["bug"] + +body: + - type: markdown + attributes: + value: "## Describe the bug" + - type: textarea + id: bug-description + attributes: + label: "Bug Description" + description: "A clear and concise description of what the bug is." + placeholder: "Explain the bug..." + validations: + required: true + + - type: markdown + attributes: + value: "## To Reproduce" + - type: textarea + id: steps-to-reproduce + attributes: + label: "Steps to Reproduce" + description: "Steps to reproduce the behavior:" + placeholder: | + 1. Go to '...' + 2. Click on '....' + 3. Scroll down to '....' + 4. See error + validations: + required: true + + - type: markdown + attributes: + value: "## Expected behavior" + - type: textarea + id: expected-behavior + attributes: + label: "Expected Behavior" + description: "A clear and concise description of what you expected to happen." + placeholder: "Describe what you expected to happen..." + validations: + required: true + + - type: markdown + attributes: + value: "## Screenshots" + - type: textarea + id: screenshots + attributes: + label: "Screenshots" + description: "If applicable, add screenshots to help explain your problem." + placeholder: "Paste your screenshots here or write 'N/A' if not applicable..." + validations: + required: false + + - type: markdown + attributes: + value: "## Deployment" + - type: checkboxes + id: deployment + attributes: + label: "Deployment Method" + description: "Please select the deployment method you are using." + options: + - label: "Docker" + - label: "Vercel" + - label: "Server" + + - type: markdown + attributes: + value: "## Desktop (please complete the following information):" + - type: input + id: desktop-os + attributes: + label: "Desktop OS" + description: "Your desktop operating system." + placeholder: "e.g., Windows 10" + validations: + required: false + - type: input + id: desktop-browser + attributes: + label: "Desktop Browser" + description: "Your desktop browser." + placeholder: "e.g., Chrome, Safari" + validations: + required: false + - type: input + id: desktop-version + attributes: + label: "Desktop Browser Version" + description: "Version of your desktop browser." + placeholder: "e.g., 89.0" + validations: + required: false + + - type: markdown + attributes: + value: "## Smartphone (please complete the following information):" + - type: input + id: smartphone-device + attributes: + label: "Smartphone Device" + description: "Your smartphone device." + placeholder: "e.g., iPhone X" + validations: + required: false + - type: input + id: smartphone-os + attributes: + label: "Smartphone OS" + description: "Your smartphone operating system." + placeholder: "e.g., iOS 14.4" + validations: + required: false + - type: input + id: smartphone-browser + attributes: + label: "Smartphone Browser" + description: "Your smartphone browser." + placeholder: "e.g., Safari" + validations: + required: false + - type: input + id: smartphone-version + attributes: + label: "Smartphone Browser Version" + description: "Version of your smartphone browser." + placeholder: "e.g., 14" + validations: + required: false + + - type: markdown + attributes: + value: "## Additional Logs" + - type: textarea + id: additional-logs + attributes: + label: "Additional Logs" + description: "Add any logs about the problem here." + placeholder: "Paste any relevant logs here..." + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 25c36ab67..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature] " -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..499781330 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,53 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement"] + +body: + - type: markdown + attributes: + value: "## Is your feature request related to a problem? Please describe." + - type: textarea + id: problem-description + attributes: + label: Problem Description + description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]" + placeholder: "Explain the problem you are facing..." + validations: + required: true + + - type: markdown + attributes: + value: "## Describe the solution you'd like" + - type: textarea + id: desired-solution + attributes: + label: Solution Description + description: A clear and concise description of what you want to happen. + placeholder: "Describe the solution you'd like..." + validations: + required: true + + - type: markdown + attributes: + value: "## Describe alternatives you've considered" + - type: textarea + id: alternatives-considered + attributes: + label: Alternatives Considered + description: A clear and concise description of any alternative solutions or features you've considered. + placeholder: "Describe any alternative solutions or features you've considered..." + validations: + required: false + + - type: markdown + attributes: + value: "## Additional context" + - type: textarea + id: additional-context + attributes: + label: Additional Context + description: Add any other context or screenshots about the feature request here. + placeholder: "Add any other context or screenshots about the feature request here..." + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/功能建议.md b/.github/ISSUE_TEMPLATE/功能建议.md deleted file mode 100644 index 3fc3d0769..000000000 --- a/.github/ISSUE_TEMPLATE/功能建议.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: 功能建议 -about: 请告诉我们你的灵光一闪 -title: "[Feature] " -labels: '' -assignees: '' - ---- - -> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。 - -> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) - -**这个功能与现有的问题有关吗?** -如果有关,请在此列出链接或者描述问题。 - -**你想要什么功能或者有什么建议?** -尽管告诉我们。 - -**有没有可以参考的同类竞品?** -可以给出参考产品的链接或者截图。 - -**其他信息** -可以说说你的其他考虑。 diff --git a/.github/ISSUE_TEMPLATE/反馈问题.md b/.github/ISSUE_TEMPLATE/反馈问题.md deleted file mode 100644 index 270263f06..000000000 --- a/.github/ISSUE_TEMPLATE/反馈问题.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -name: 反馈问题 -about: 请告诉我们你遇到的问题 -title: "[Bug] " -labels: '' -assignees: '' - ---- - -> 为了提高交流效率,我们设立了官方 QQ 群和 QQ 频道,如果你在使用或者搭建过程中遇到了任何问题,请先第一时间加群或者频道咨询解决,除非是可以稳定复现的 Bug 或者较为有创意的功能建议,否则请不要随意往 Issue 区发送低质无意义帖子。 - -> [点击加入官方群聊](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) - -**反馈须知** - -⚠️ 注意:不遵循此模板的任何帖子都会被立即关闭,如果没有提供下方的信息,我们无法定位你的问题。 - -请在下方中括号内输入 x 来表示你已经知晓相关内容。 -- [ ] 我确认已经在 [常见问题](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/faq-cn.md) 中搜索了此次反馈的问题,没有找到解答; -- [ ] 我确认已经在 [Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) 列表(包括已经 Close 的)中搜索了此次反馈的问题,没有找到解答。 -- [ ] 我确认已经在 [Vercel 使用教程](https://github.com/Yidadaa/ChatGPT-Next-Web/blob/main/docs/vercel-cn.md) 中搜索了此次反馈的问题,没有找到解答。 - -**描述问题** -请在此描述你遇到了什么问题。 - -**如何复现** -请告诉我们你是通过什么操作触发的该问题。 - -**截图** -请在此提供控制台截图、屏幕截图或者服务端的 log 截图。 - -**一些必要的信息** - - 系统:[比如 windows 10/ macos 12/ linux / android 11 / ios 16] - - 浏览器: [比如 chrome, safari] - - 版本: [填写设置页面的版本号] - - 部署方式:[比如 vercel、docker 或者服务器部署] diff --git a/.github/workflows/app.yml b/.github/workflows/app.yml index aebba28f7..7e74cf045 100644 --- a/.github/workflows/app.yml +++ b/.github/workflows/app.yml @@ -43,12 +43,9 @@ jobs: - os: ubuntu-latest arch: x86_64 rust_target: x86_64-unknown-linux-gnu - - os: macos-latest - arch: x86_64 - rust_target: x86_64-apple-darwin - os: macos-latest arch: aarch64 - rust_target: aarch64-apple-darwin + rust_target: x86_64-apple-darwin,aarch64-apple-darwin - os: windows-latest arch: x86_64 rust_target: x86_64-pc-windows-msvc @@ -60,13 +57,14 @@ jobs: uses: actions/setup-node@v3 with: node-version: 18 + cache: 'yarn' - name: install Rust stable uses: dtolnay/rust-toolchain@stable with: targets: ${{ matrix.config.rust_target }} - uses: Swatinem/rust-cache@v2 with: - key: ${{ matrix.config.rust_target }} + key: ${{ matrix.config.os }} - name: install dependencies (ubuntu only) if: matrix.config.os == 'ubuntu-latest' run: | @@ -79,8 +77,15 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} TAURI_PRIVATE_KEY: ${{ secrets.TAURI_PRIVATE_KEY }} TAURI_KEY_PASSWORD: ${{ secrets.TAURI_KEY_PASSWORD }} + APPLE_CERTIFICATE: ${{ secrets.APPLE_CERTIFICATE }} + APPLE_CERTIFICATE_PASSWORD: ${{ secrets.APPLE_CERTIFICATE_PASSWORD }} + APPLE_SIGNING_IDENTITY: ${{ secrets.APPLE_SIGNING_IDENTITY }} + APPLE_ID: ${{ secrets.APPLE_ID }} + APPLE_PASSWORD: ${{ secrets.APPLE_PASSWORD }} + APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }} with: releaseId: ${{ needs.create-release.outputs.release_id }} + args: ${{ matrix.config.os == 'macos-latest' && '--target universal-apple-darwin' || '' }} publish-release: permissions: diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml new file mode 100644 index 000000000..bdbb78c27 --- /dev/null +++ b/.github/workflows/deploy_preview.yml @@ -0,0 +1,84 @@ +name: VercelPreviewDeployment + +on: + pull_request_target: + types: + - opened + - synchronize + - reopened + +env: + VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} + VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} + VERCEL_PR_DOMAIN_SUFFIX: ${{ secrets.VERCEL_PR_DOMAIN_SUFFIX }} + +permissions: + contents: read + statuses: write + pull-requests: write + +jobs: + deploy-preview: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> "$GITHUB_OUTPUT" + id: extract_branch + + - name: Hash branch name + uses: pplanel/hash-calculator-action@v1.3.1 + id: hash_branch + with: + input: ${{ steps.extract_branch.outputs.branch }} + method: MD5 + + - name: Set Environment Variables + id: set_env + if: github.event_name == 'pull_request_target' + run: | + echo "VERCEL_ALIAS_DOMAIN=${{ github.event.pull_request.number }}-${{ github.workflow }}.${VERCEL_PR_DOMAIN_SUFFIX}" >> $GITHUB_OUTPUT + + - name: Install Vercel CLI + run: npm install --global vercel@latest + + - name: Cache dependencies + uses: actions/cache@v2 + id: cache-npm + with: + path: ~/.npm + key: npm-${{ hashFiles('package-lock.json') }} + restore-keys: npm- + + - name: Pull Vercel Environment Information + run: vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} + + - name: Deploy Project Artifacts to Vercel + id: vercel + env: + META_TAG: ${{ steps.hash_branch.outputs.digest }}-${{ github.run_number }}-${{ github.run_attempt}} + run: | + set -e + vercel pull --yes --environment=preview --token=${VERCEL_TOKEN} + vercel build --token=${VERCEL_TOKEN} + vercel deploy --prebuilt --archive=tgz --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }} + + DEFAULT_URL=$(vercel ls --token=${VERCEL_TOKEN} --meta base_hash=${{ env.META_TAG }}) + ALIAS_URL=$(vercel alias set ${DEFAULT_URL} ${{ steps.set_env.outputs.VERCEL_ALIAS_DOMAIN }} --token=${VERCEL_TOKEN} --scope ${VERCEL_TEAM}| awk '{print $3}') + + echo "New preview URL: ${DEFAULT_URL}" + echo "New alias URL: ${ALIAS_URL}" + echo "VERCEL_URL=${ALIAS_URL}" >> "$GITHUB_OUTPUT" + + - uses: mshick/add-pr-comment@v2 + with: + message: | + Your build has completed! + + [Preview deployment](${{ steps.vercel.outputs.VERCEL_URL }}) diff --git a/.github/workflows/remove_deploy_preview.yml b/.github/workflows/remove_deploy_preview.yml new file mode 100644 index 000000000..4846cda2d --- /dev/null +++ b/.github/workflows/remove_deploy_preview.yml @@ -0,0 +1,40 @@ +name: Removedeploypreview + +permissions: + contents: read + statuses: write + pull-requests: write + +env: + VERCEL_TOKEN: ${{ secrets.VERCEL_TOKEN }} + VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} + VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} + +on: + pull_request_target: + types: + - closed + +jobs: + delete-deployments: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Extract branch name + shell: bash + run: echo "branch=${GITHUB_HEAD_REF:-${GITHUB_REF#refs/heads/}}" >> $GITHUB_OUTPUT + id: extract_branch + + - name: Hash branch name + uses: pplanel/hash-calculator-action@v1.3.1 + id: hash_branch + with: + input: ${{ steps.extract_branch.outputs.branch }} + method: MD5 + + - name: Call the delete-deployment-preview.sh script + env: + META_TAG: ${{ steps.hash_branch.outputs.digest }} + run: | + bash ./scripts/delete-deployment-preview.sh diff --git a/.github/workflows/sync.yml b/.github/workflows/sync.yml index ebf5587d0..e04e30adb 100644 --- a/.github/workflows/sync.yml +++ b/.github/workflows/sync.yml @@ -24,7 +24,7 @@ jobs: id: sync uses: aormsby/Fork-Sync-With-Upstream-action@v3.4 with: - upstream_sync_repo: Yidadaa/ChatGPT-Next-Web + upstream_sync_repo: ChatGPTNextWeb/ChatGPT-Next-Web upstream_sync_branch: main target_sync_branch: main target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set diff --git a/Dockerfile b/Dockerfile index 720a0cfe9..436d39d82 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,7 @@ FROM base AS builder RUN apk update && apk add --no-cache git ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" WORKDIR /app @@ -31,6 +32,7 @@ RUN apk add proxychains-ng ENV PROXY_URL="" ENV OPENAI_API_KEY="" +ENV GOOGLE_API_KEY="" ENV CODE="" COPY --from=builder /app/public ./public @@ -41,22 +43,22 @@ COPY --from=builder /app/.next/server ./.next/server EXPOSE 3000 CMD if [ -n "$PROXY_URL" ]; then \ - export HOSTNAME="127.0.0.1"; \ - protocol=$(echo $PROXY_URL | cut -d: -f1); \ - host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ - port=$(echo $PROXY_URL | cut -d: -f3); \ - conf=/etc/proxychains.conf; \ - echo "strict_chain" > $conf; \ - echo "proxy_dns" >> $conf; \ - echo "remote_dns_subnet 224" >> $conf; \ - echo "tcp_read_time_out 15000" >> $conf; \ - echo "tcp_connect_time_out 8000" >> $conf; \ - echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ - echo "localnet ::1/128" >> $conf; \ - echo "[ProxyList]" >> $conf; \ - echo "$protocol $host $port" >> $conf; \ - cat /etc/proxychains.conf; \ - proxychains -f $conf node server.js; \ + export HOSTNAME="127.0.0.1"; \ + protocol=$(echo $PROXY_URL | cut -d: -f1); \ + host=$(echo $PROXY_URL | cut -d/ -f3 | cut -d: -f1); \ + port=$(echo $PROXY_URL | cut -d: -f3); \ + conf=/etc/proxychains.conf; \ + echo "strict_chain" > $conf; \ + echo "proxy_dns" >> $conf; \ + echo "remote_dns_subnet 224" >> $conf; \ + echo "tcp_read_time_out 15000" >> $conf; \ + echo "tcp_connect_time_out 8000" >> $conf; \ + echo "localnet 127.0.0.0/255.0.0.0" >> $conf; \ + echo "localnet ::1/128" >> $conf; \ + echo "[ProxyList]" >> $conf; \ + echo "$protocol $host $port" >> $conf; \ + cat /etc/proxychains.conf; \ + proxychains -f $conf node server.js; \ else \ - node server.js; \ + node server.js; \ fi diff --git a/README.md b/README.md index 7c7a6f243..429a02d63 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,22 @@
-icon +icon -

ChatGPT Next Web

+

NextChat (ChatGPT Next Web)

English / [简体中文](./README_CN.md) -One-Click to get well-designed cross-platform ChatGPT web UI. +One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 & Gemini Pro support. -一键免费部署你的跨平台私人 ChatGPT 应用。 +一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://chatgpt.nextweb.fun/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/mortiest_ricky) / [Buy Me a Coffee](https://www.buymeacoffee.com/yidadaa) +[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://chatgpt.nextweb.fun/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [QQ 群](https://github.com/Yidadaa/ChatGPT-Next-Web/discussions/1724) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) +[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) [web-url]: https://chatgpt.nextweb.fun [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases @@ -25,7 +25,9 @@ One-Click to get well-designed cross-platform ChatGPT web UI. [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) +[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) + +[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -37,8 +39,8 @@ One-Click to get well-designed cross-platform ChatGPT web UI. - **Deploy for free with one-click** on Vercel in under 1 minute - Compact client (~5MB) on Linux/Windows/MacOS, [download it now](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) -- Fully compatible with self-deployed llms, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) -- Privacy first, all data stored locally in the browser +- Fully compatible with self-deployed LLMs, recommended for use with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) or [LocalAI](https://github.com/go-skynet/LocalAI) +- Privacy first, all data is stored locally in the browser - Markdown support: LaTex, mermaid, code highlight, etc. - Responsive design, dark mode and PWA - Fast first screen loading speed (~100kb), support streaming response @@ -59,10 +61,11 @@ One-Click to get well-designed cross-platform ChatGPT web UI. ## What's New -- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). -- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! -- 🚀 v2.8 now we have a client that runs across all platforms! +- 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.9.11 you can use azure endpoint now. +- 🚀 v2.8 now we have a client that runs across all platforms! +- 🚀 v2.7 let's share conversations as image, or share to ShareGPT! +- 🚀 v2.0 is released, now you can create prompt templates, turn your ideas into reality! Read this: [ChatGPT Prompt Engineering Tips: Zero, One and Few Shot Prompting](https://www.allabtai.com/prompt-engineering-tips-zero-one-and-few-shot-prompting/). ## 主要功能 @@ -189,6 +192,14 @@ Azure Api Key. Azure Api Version, find it at [Azure Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions). +### `GOOGLE_API_KEY` (optional) + +Google Gemini Pro Api Key. + +### `GOOGLE_URL` (optional) + +Google Gemini Pro Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty @@ -205,7 +216,7 @@ If you do not want users to use GPT-4, set this value to 1. > Default: Empty -If you do want users to query balance, set this value to 1, or you should set it to 0. +If you do want users to query balance, set this value to 1. ### `DISABLE_FAST_LINK` (optional) @@ -350,9 +361,11 @@ If you want to add a new translation, read this [document](./docs/translation.md [@Licoy](https://github.com/Licoy) [@shangmin2009](https://github.com/shangmin2009) -### Contributor +### Contributors -[Contributors](https://github.com/Yidadaa/ChatGPT-Next-Web/graphs/contributors) + + + ## LICENSE diff --git a/README_CN.md b/README_CN.md index d73479658..4acefefa5 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,14 +1,16 @@
预览 -

ChatGPT Next Web

+

NextChat

-一键免费部署你的私人 ChatGPT 网页应用。 +一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) / [QQ 群](https://user-images.githubusercontent.com/16968934/228190818-7dd00845-e9b9-4363-97e5-44c507ac76da.jpeg) / [打赏开发者](https://user-images.githubusercontent.com/16968934/227772541-5bcd52d8-61b7-488c-a203-0330d8006e2b.jpg) / [Donate](#捐赠-donate-usdt) +[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web) +[![Deploy on Zeabur](https://zeabur.com/button.svg)](https://zeabur.com/templates/ZBUEFA) + [![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) ![主界面](./docs/images/cover.png) @@ -19,7 +21,7 @@ 1. 准备好你的 [OpenAI API Key](https://platform.openai.com/account/api-keys); 2. 点击右侧按钮开始部署: - [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; + [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FYidadaa%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&env=GOOGLE_API_KEY&project-name=chatgpt-next-web&repository-name=ChatGPT-Next-Web),直接使用 Github 账号登录即可,记得在环境变量页填入 API Key 和[页面访问密码](#配置页面访问密码) CODE; 3. 部署完毕后,即可开始使用; 4. (可选)[绑定自定义域名](https://vercel.com/docs/concepts/projects/domains/add-a-domain):Vercel 分配的域名 DNS 在某些区域被污染了,绑定自定义域名即可直连。 @@ -104,6 +106,14 @@ Azure 密钥。 Azure Api 版本,你可以在这里找到:[Azure 文档](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions)。 +### `GOOGLE_API_KEY` (optional) + +Google Gemini Pro 密钥. + +### `GOOGLE_URL` (optional) + +Google Gemini Pro Api Url. + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 diff --git a/app/api/auth.ts b/app/api/auth.ts index b41e34e05..16c8034eb 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -1,7 +1,7 @@ import { NextRequest } from "next/server"; import { getServerSideConfig } from "../config/server"; import md5 from "spark-md5"; -import { ACCESS_CODE_PREFIX } from "../constant"; +import { ACCESS_CODE_PREFIX, ModelProvider } from "../constant"; function getIP(req: NextRequest) { let ip = req.ip ?? req.headers.get("x-real-ip"); @@ -16,15 +16,15 @@ function getIP(req: NextRequest) { function parseApiKey(bearToken: string) { const token = bearToken.trim().replaceAll("Bearer ", "").trim(); - const isOpenAiKey = !token.startsWith(ACCESS_CODE_PREFIX); + const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX); return { - accessCode: isOpenAiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), - apiKey: isOpenAiKey ? token : "", + accessCode: isApiKey ? "" : token.slice(ACCESS_CODE_PREFIX.length), + apiKey: isApiKey ? token : "", }; } -export function auth(req: NextRequest) { +export function auth(req: NextRequest, modelProvider: ModelProvider) { const authToken = req.headers.get("Authorization") ?? ""; // check if it is openai api key or user token @@ -49,22 +49,23 @@ export function auth(req: NextRequest) { if (serverConfig.hideUserApiKey && !!apiKey) { return { error: true, - msg: "you are not allowed to access openai with your own api key", + msg: "you are not allowed to access with your own api key", }; } // if user does not provide an api key, inject system api key if (!apiKey) { - const serverApiKey = serverConfig.isAzure - ? serverConfig.azureApiKey - : serverConfig.apiKey; + const serverConfig = getServerSideConfig(); - if (serverApiKey) { + const systemApiKey = + modelProvider === ModelProvider.GeminiPro + ? serverConfig.googleApiKey + : serverConfig.isAzure + ? serverConfig.azureApiKey + : serverConfig.apiKey; + if (systemApiKey) { console.log("[Auth] use system api key"); - req.headers.set( - "Authorization", - `${serverConfig.isAzure ? "" : "Bearer "}${serverApiKey}`, - ); + req.headers.set("Authorization", `Bearer ${systemApiKey}`); } else { console.log("[Auth] admin did not provide an api key"); } diff --git a/app/api/common.ts b/app/api/common.ts index 48ddfb5f0..a75f2de5c 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,6 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { DEFAULT_MODELS, OPENAI_BASE_URL } from "../constant"; +import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant"; import { collectModelTable } from "../utils/model"; import { makeAzurePath } from "../azure"; @@ -9,8 +9,21 @@ const serverConfig = getServerSideConfig(); export async function requestOpenai(req: NextRequest) { const controller = new AbortController(); - const authValue = req.headers.get("Authorization") ?? ""; - const authHeaderName = serverConfig.isAzure ? "api-key" : "Authorization"; + var authValue, + authHeaderName = ""; + if (serverConfig.isAzure) { + authValue = + req.headers + .get("Authorization") + ?.trim() + .replaceAll("Bearer ", "") + .trim() ?? ""; + + authHeaderName = "api-key"; + } else { + authValue = req.headers.get("Authorization") ?? ""; + authHeaderName = "Authorization"; + } let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( "/api/openai/", @@ -116,12 +129,20 @@ export async function requestOpenai(req: NextRequest) { // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); + // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV) // Also, this is to prevent the header from being sent to the client if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") { newHeaders.delete("OpenAI-Organization"); } + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + + return new Response(res.body, { status: res.status, statusText: res.statusText, diff --git a/app/api/cors/[...path]/route.ts b/app/api/cors/[...path]/route.ts deleted file mode 100644 index 0217b12b0..000000000 --- a/app/api/cors/[...path]/route.ts +++ /dev/null @@ -1,43 +0,0 @@ -import { NextRequest, NextResponse } from "next/server"; - -async function handle( - req: NextRequest, - { params }: { params: { path: string[] } }, -) { - if (req.method === "OPTIONS") { - return NextResponse.json({ body: "OK" }, { status: 200 }); - } - - const [protocol, ...subpath] = params.path; - const targetUrl = `${protocol}://${subpath.join("/")}`; - - const method = req.headers.get("method") ?? undefined; - const shouldNotHaveBody = ["get", "head"].includes( - method?.toLowerCase() ?? "", - ); - - const fetchOptions: RequestInit = { - headers: { - authorization: req.headers.get("authorization") ?? "", - }, - body: shouldNotHaveBody ? null : req.body, - method, - // @ts-ignore - duplex: "half", - }; - - const fetchResult = await fetch(targetUrl, fetchOptions); - - console.log("[Any Proxy]", targetUrl, { - status: fetchResult.status, - statusText: fetchResult.statusText, - }); - - return fetchResult; -} - -export const POST = handle; -export const GET = handle; -export const OPTIONS = handle; - -export const runtime = "nodejs"; diff --git a/app/api/google/[...path]/route.ts b/app/api/google/[...path]/route.ts new file mode 100644 index 000000000..ebd192891 --- /dev/null +++ b/app/api/google/[...path]/route.ts @@ -0,0 +1,116 @@ +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "../../auth"; +import { getServerSideConfig } from "@/app/config/server"; +import { GEMINI_BASE_URL, Google, ModelProvider } from "@/app/constant"; + +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Google Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const controller = new AbortController(); + + const serverConfig = getServerSideConfig(); + + let baseUrl = serverConfig.googleUrl || GEMINI_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + let path = `${req.nextUrl.pathname}`.replaceAll("/api/google/", ""); + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const authResult = auth(req, ModelProvider.GeminiPro); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + const bearToken = req.headers.get("Authorization") ?? ""; + const token = bearToken.trim().replaceAll("Bearer ", "").trim(); + + const key = token ? token : serverConfig.googleApiKey; + + if (!key) { + return NextResponse.json( + { + error: true, + message: `missing GOOGLE_API_KEY in server env vars`, + }, + { + status: 401, + }, + ); + } + + const fetchUrl = `${baseUrl}/${path}?key=${key}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + "Cache-Control": "no-store", + }, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} + +export const GET = handle; +export const POST = handle; + +export const runtime = "edge"; +export const preferredRegion = [ + "bom1", + "cle1", + "cpt1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts index 2addd53a5..77059c151 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai/[...path]/route.ts @@ -1,6 +1,6 @@ import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; import { getServerSideConfig } from "@/app/config/server"; -import { OpenaiPath } from "@/app/constant"; +import { ModelProvider, OpenaiPath } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; @@ -45,7 +45,7 @@ async function handle( ); } - const authResult = auth(req); + const authResult = auth(req, ModelProvider.GPT); if (authResult.error) { return NextResponse.json(authResult, { status: 401, @@ -75,4 +75,22 @@ export const GET = handle; export const POST = handle; export const runtime = "edge"; -export const preferredRegion = ['arn1', 'bom1', 'cdg1', 'cle1', 'cpt1', 'dub1', 'fra1', 'gru1', 'hnd1', 'iad1', 'icn1', 'kix1', 'lhr1', 'pdx1', 'sfo1', 'sin1', 'syd1']; +export const preferredRegion = [ + "arn1", + "bom1", + "cdg1", + "cle1", + "cpt1", + "dub1", + "fra1", + "gru1", + "hnd1", + "iad1", + "icn1", + "kix1", + "lhr1", + "pdx1", + "sfo1", + "sin1", + "syd1", +]; diff --git a/app/api/upstash/[action]/[...key]/route.ts b/app/api/upstash/[action]/[...key]/route.ts new file mode 100644 index 000000000..fcfef4718 --- /dev/null +++ b/app/api/upstash/[action]/[...key]/route.ts @@ -0,0 +1,73 @@ +import { NextRequest, NextResponse } from "next/server"; + +async function handle( + req: NextRequest, + { params }: { params: { action: string; key: string[] } }, +) { + const requestUrl = new URL(req.url); + const endpoint = requestUrl.searchParams.get("endpoint"); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + const [...key] = params.key; + // only allow to request to *.upstash.io + if (!endpoint || !new URL(endpoint).hostname.endsWith(".upstash.io")) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + params.key.join("/"), + }, + { + status: 403, + }, + ); + } + + // only allow upstash get and set method + if (params.action !== "get" && params.action !== "set") { + console.log("[Upstash Route] forbidden action ", params.action); + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + params.action, + }, + { + status: 403, + }, + ); + } + + const targetUrl = `${endpoint}/${params.action}/${params.key.join("/")}`; + + const method = req.method; + const shouldNotHaveBody = ["get", "head"].includes( + method?.toLowerCase() ?? "", + ); + + const fetchOptions: RequestInit = { + headers: { + authorization: req.headers.get("authorization") ?? "", + }, + body: shouldNotHaveBody ? null : req.body, + method, + // @ts-ignore + duplex: "half", + }; + + console.log("[Upstash Proxy]", targetUrl, fetchOptions); + const fetchResult = await fetch(targetUrl, fetchOptions); + + console.log("[Any Proxy]", targetUrl, { + status: fetchResult.status, + statusText: fetchResult.statusText, + }); + + return fetchResult; +} + +export const POST = handle; +export const GET = handle; +export const OPTIONS = handle; + +export const runtime = "edge"; diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts new file mode 100644 index 000000000..56c2388ae --- /dev/null +++ b/app/api/webdav/[...path]/route.ts @@ -0,0 +1,124 @@ +import { NextRequest, NextResponse } from "next/server"; +import { STORAGE_KEY } from "../../../constant"; +async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + const folder = STORAGE_KEY; + const fileName = `${folder}/backup.json`; + + const requestUrl = new URL(req.url); + let endpoint = requestUrl.searchParams.get("endpoint"); + + // Validate the endpoint to prevent potential SSRF attacks + if (!endpoint || !endpoint.startsWith("/")) { + return NextResponse.json( + { + error: true, + msg: "Invalid endpoint", + }, + { + status: 400, + }, + ); + } + const endpointPath = params.path.join("/"); + const targetPath = `${endpoint}/${endpointPath}`; + + // only allow MKCOL, GET, PUT + if (req.method !== "MKCOL" && req.method !== "GET" && req.method !== "PUT") { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for MKCOL request, only allow request ${folder} + if ( + req.method === "MKCOL" && + !targetPath.endsWith(folder) + ) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for GET request, only allow request ending with fileName + if ( + req.method === "GET" && + !targetPath.endsWith(fileName) + ) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + // for PUT request, only allow request ending with fileName + if ( + req.method === "PUT" && + !targetPath.endsWith(fileName) + ) { + return NextResponse.json( + { + error: true, + msg: "you are not allowed to request " + targetPath, + }, + { + status: 403, + }, + ); + } + + const targetUrl = `${endpoint}/${endpointPath}`; + + const method = req.method; + const shouldNotHaveBody = ["get", "head"].includes( + method?.toLowerCase() ?? "", + ); + + const fetchOptions: RequestInit = { + headers: { + authorization: req.headers.get("authorization") ?? "", + }, + body: shouldNotHaveBody ? null : req.body, + redirect: 'manual', + method, + // @ts-ignore + duplex: "half", + }; + + const fetchResult = await fetch(targetUrl, fetchOptions); + + console.log("[Any Proxy]", targetUrl, { + status: fetchResult.status, + statusText: fetchResult.statusText, + }); + + return fetchResult; +} + +export const POST = handle; +export const GET = handle; +export const OPTIONS = handle; + +export const runtime = "edge"; diff --git a/app/client/api.ts b/app/client/api.ts index eedd2c9ab..c4d548a41 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,17 +1,30 @@ import { getClientConfig } from "../config/client"; -import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant"; -import { ChatMessage, ModelType, useAccessStore } from "../store"; +import { + ACCESS_CODE_PREFIX, + Azure, + ModelProvider, + ServiceProvider, +} from "../constant"; +import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; import { ChatGPTApi } from "./platforms/openai"; - +import { GeminiProApi } from "./platforms/google"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; export type ChatModel = ModelType; +export interface MultimodalContent { + type: "text" | "image_url"; + text?: string; + image_url?: { + url: string; + }; +} + export interface RequestMessage { role: MessageRole; - content: string; + content: string | MultimodalContent[]; } export interface LLMConfig { @@ -41,6 +54,13 @@ export interface LLMUsage { export interface LLMModel { name: string; available: boolean; + provider: LLMModelProvider; +} + +export interface LLMModelProvider { + id: string; + providerName: string; + providerType: string; } export abstract class LLMApi { @@ -73,7 +93,11 @@ interface ChatProvider { export class ClientApi { public llm: LLMApi; - constructor() { + constructor(provider: ModelProvider = ModelProvider.GPT) { + if (provider === ModelProvider.GeminiPro) { + this.llm = new GeminiProApi(); + return; + } this.llm = new ChatGPTApi(); } @@ -93,7 +117,7 @@ export class ClientApi { { from: "human", value: - "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web", + "Share from [NextChat]: https://github.com/Yidadaa/ChatGPT-Next-Web", }, ]); // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 @@ -123,32 +147,38 @@ export class ClientApi { } } -export const api = new ClientApi(); - export function getHeaders() { const accessStore = useAccessStore.getState(); const headers: Record = { "Content-Type": "application/json", - "x-requested-with": "XMLHttpRequest", + Accept: "application/json", }; - + const modelConfig = useChatStore.getState().currentSession().mask.modelConfig; + const isGoogle = modelConfig.model.startsWith("gemini"); const isAzure = accessStore.provider === ServiceProvider.Azure; const authHeader = isAzure ? "api-key" : "Authorization"; - const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey; - + const apiKey = isGoogle + ? accessStore.googleApiKey + : isAzure + ? accessStore.azureApiKey + : accessStore.openaiApiKey; + const clientConfig = getClientConfig(); const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`; const validString = (x: string) => x && x.length > 0; - // use user's api key first - if (validString(apiKey)) { - headers[authHeader] = makeBearer(apiKey); - } else if ( - accessStore.enabledAccessControl() && - validString(accessStore.accessCode) - ) { - headers[authHeader] = makeBearer( - ACCESS_CODE_PREFIX + accessStore.accessCode, - ); + // when using google api in app, not set auth header + if (!(isGoogle && clientConfig?.isApp)) { + // use user's api key first + if (validString(apiKey)) { + headers[authHeader] = makeBearer(apiKey); + } else if ( + accessStore.enabledAccessControl() && + validString(accessStore.accessCode) + ) { + headers[authHeader] = makeBearer( + ACCESS_CODE_PREFIX + accessStore.accessCode, + ); + } } return headers; diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts new file mode 100644 index 000000000..848e5cd3f --- /dev/null +++ b/app/client/platforms/google.ts @@ -0,0 +1,278 @@ +import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { getClientConfig } from "@/app/config/client"; +import { DEFAULT_API_HOST } from "@/app/constant"; +import { + getMessageTextContent, + getMessageImages, + isVisionModel, +} from "@/app/utils"; + +export class GeminiProApi implements LLMApi { + extractMessage(res: any) { + console.log("[Response] gemini-pro response: ", res); + + return ( + res?.candidates?.at(0)?.content?.parts.at(0)?.text || + res?.error?.message || + "" + ); + } + async chat(options: ChatOptions): Promise { + // const apiClient = this; + const visionModel = isVisionModel(options.config.model); + let multimodal = false; + const messages = options.messages.map((v) => { + let parts: any[] = [{ text: getMessageTextContent(v) }]; + if (visionModel) { + const images = getMessageImages(v); + if (images.length > 0) { + multimodal = true; + parts = parts.concat( + images.map((image) => { + const imageType = image.split(";")[0].split(":")[1]; + const imageData = image.split(",")[1]; + return { + inline_data: { + mime_type: imageType, + data: imageData, + }, + }; + }), + ); + } + } + return { + role: v.role.replace("assistant", "model").replace("system", "user"), + parts: parts, + }; + }); + + // google requires that role in neighboring messages must not be the same + for (let i = 0; i < messages.length - 1; ) { + // Check if current and next item both have the role "model" + if (messages[i].role === messages[i + 1].role) { + // Concatenate the 'parts' of the current and next item + messages[i].parts = messages[i].parts.concat(messages[i + 1].parts); + // Remove the next item + messages.splice(i + 1, 1); + } else { + // Move to the next item + i++; + } + } + // if (visionModel && messages.length > 1) { + // options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision")); + // } + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + const requestPayload = { + contents: messages, + generationConfig: { + // stopSequences: [ + // "Title" + // ], + temperature: modelConfig.temperature, + maxOutputTokens: modelConfig.max_tokens, + topP: modelConfig.top_p, + // "topK": modelConfig.top_k, + }, + safetySettings: [ + { + category: "HARM_CATEGORY_HARASSMENT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_HATE_SPEECH", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_SEXUALLY_EXPLICIT", + threshold: "BLOCK_ONLY_HIGH", + }, + { + category: "HARM_CATEGORY_DANGEROUS_CONTENT", + threshold: "BLOCK_ONLY_HIGH", + }, + ], + }; + + const accessStore = useAccessStore.getState(); + let baseUrl = accessStore.googleUrl; + const isApp = !!getClientConfig()?.isApp; + + let shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + try { + let googleChatPath = visionModel + ? Google.VisionChatPath + : Google.ChatPath; + let chatPath = this.path(googleChatPath); + + // let baseUrl = accessStore.googleUrl; + + if (!baseUrl) { + baseUrl = isApp + ? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath + : chatPath; + } + + if (isApp) { + baseUrl += `?key=${accessStore.googleApiKey}`; + } + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + if (shouldStream) { + let responseText = ""; + let remainText = ""; + let finished = false; + + let existingTexts: string[] = []; + const finish = () => { + finished = true; + options.onFinish(existingTexts.join("")); + }; + + // animate response to make it looks smooth + function animateResponseText() { + if (finished || controller.signal.aborted) { + responseText += remainText; + finish(); + return; + } + + if (remainText.length > 0) { + const fetchCount = Math.max(1, Math.round(remainText.length / 60)); + const fetchText = remainText.slice(0, fetchCount); + responseText += fetchText; + remainText = remainText.slice(fetchCount); + options.onUpdate?.(responseText, fetchText); + } + + requestAnimationFrame(animateResponseText); + } + + // start animaion + animateResponseText(); + + fetch( + baseUrl.replace("generateContent", "streamGenerateContent"), + chatPayload, + ) + .then((response) => { + const reader = response?.body?.getReader(); + const decoder = new TextDecoder(); + let partialData = ""; + + return reader?.read().then(function processText({ + done, + value, + }): Promise { + if (done) { + if (response.status !== 200) { + try { + let data = JSON.parse(ensureProperEnding(partialData)); + if (data && data[0].error) { + options.onError?.(new Error(data[0].error.message)); + } else { + options.onError?.(new Error("Request failed")); + } + } catch (_) { + options.onError?.(new Error("Request failed")); + } + } + + console.log("Stream complete"); + // options.onFinish(responseText + remainText); + finished = true; + return Promise.resolve(); + } + + partialData += decoder.decode(value, { stream: true }); + + try { + let data = JSON.parse(ensureProperEnding(partialData)); + + const textArray = data.reduce( + (acc: string[], item: { candidates: any[] }) => { + const texts = item.candidates.map((candidate) => + candidate.content.parts + .map((part: { text: any }) => part.text) + .join(""), + ); + return acc.concat(texts); + }, + [], + ); + + if (textArray.length > existingTexts.length) { + const deltaArray = textArray.slice(existingTexts.length); + existingTexts = textArray; + remainText += deltaArray.join(""); + } + } catch (error) { + // console.log("[Response Animation] error: ", error,partialData); + // skip error message when parsing json + } + + return reader.read().then(processText); + }); + }) + .catch((error) => { + console.error("Error:", error); + }); + } else { + const res = await fetch(baseUrl, chatPayload); + clearTimeout(requestTimeoutId); + const resJson = await res.json(); + if (resJson?.promptFeedback?.blockReason) { + // being blocked + options.onError?.( + new Error( + "Message is being blocked for reason: " + + resJson.promptFeedback.blockReason, + ), + ); + } + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + usage(): Promise { + throw new Error("Method not implemented."); + } + async models(): Promise { + return []; + } + path(path: string): string { + return "/api/google/" + path; + } +} + +function ensureProperEnding(str: string) { + if (str.startsWith("[") && !str.endsWith("]")) { + return str + "]"; + } + return str; +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 8ea864692..78e628ad7 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -1,3 +1,4 @@ +"use client"; import { ApiPath, DEFAULT_API_HOST, @@ -8,7 +9,14 @@ import { } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + MultimodalContent, +} from "../api"; import Locale from "../../locales"; import { EventStreamContentType, @@ -17,6 +25,11 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { makeAzurePath } from "@/app/azure"; +import { + getMessageTextContent, + getMessageImages, + isVisionModel, +} from "@/app/utils"; export interface OpenAIListModelResponse { object: string; @@ -45,7 +58,9 @@ export class ChatGPTApi implements LLMApi { if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp ? DEFAULT_API_HOST : ApiPath.OpenAI; + baseUrl = isApp + ? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI + : ApiPath.OpenAI; } if (baseUrl.endsWith("/")) { @@ -59,6 +74,8 @@ export class ChatGPTApi implements LLMApi { path = makeAzurePath(path, accessStore.azureApiVersion); } + console.log("[Proxy Endpoint] ", baseUrl, path); + return [baseUrl, path].join("/"); } @@ -67,9 +84,10 @@ export class ChatGPTApi implements LLMApi { } async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); const messages = options.messages.map((v) => ({ role: v.role, - content: v.content, + content: visionModel ? v.content : getMessageTextContent(v), })); const modelConfig = { @@ -92,6 +110,16 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; + // add max_tokens to vision model + if (visionModel) { + Object.defineProperty(requestPayload, "max_tokens", { + enumerable: true, + configurable: true, + writable: true, + value: modelConfig.max_tokens, + }); + } + console.log("[Request] openai payload: ", requestPayload); const shouldStream = !!options.config.stream; @@ -123,6 +151,9 @@ export class ChatGPTApi implements LLMApi { if (finished || controller.signal.aborted) { responseText += remainText; console.log("[Response Animation] finished"); + if (responseText?.length === 0) { + options.onError?.(new Error("empty response from server")); + } return; } @@ -323,6 +354,11 @@ export class ChatGPTApi implements LLMApi { return chatModels.map((m) => ({ name: m.id, available: true, + provider: { + id: "openai", + providerName: "OpenAI", + providerType: "openai", + }, })); } } diff --git a/app/components/auth.tsx b/app/components/auth.tsx index 7962d46be..57118349b 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -64,6 +64,17 @@ export function AuthPage() { ); }} /> + { + accessStore.update( + (access) => (access.googleApiKey = e.currentTarget.value), + ); + }} + /> ) : null} diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 33967717d..7ef6e7b83 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -12,7 +12,7 @@ import { import { useChatStore } from "../store"; import Locale from "../locales"; -import { Link, useNavigate } from "react-router-dom"; +import { Link, useLocation, useNavigate } from "react-router-dom"; import { Path } from "../constant"; import { MaskAvatar } from "./mask"; import { Mask } from "../store/mask"; @@ -40,12 +40,16 @@ export function ChatItem(props: { }); } }, [props.selected]); + + const { pathname: currentPath } = useLocation(); return ( {(provided) => (
{ diff --git a/app/components/chat.module.scss b/app/components/chat.module.scss index 16790ccb1..e7619e92b 100644 --- a/app/components/chat.module.scss +++ b/app/components/chat.module.scss @@ -1,5 +1,47 @@ @import "../styles/animation.scss"; +.attach-images { + position: absolute; + left: 30px; + bottom: 32px; + display: flex; +} + +.attach-image { + cursor: default; + width: 64px; + height: 64px; + border: rgba($color: #888, $alpha: 0.2) 1px solid; + border-radius: 5px; + margin-right: 10px; + background-size: cover; + background-position: center; + background-color: var(--white); + + .attach-image-mask { + width: 100%; + height: 100%; + opacity: 0; + transition: all ease 0.2s; + } + + .attach-image-mask:hover { + opacity: 1; + } + + .delete-image { + width: 24px; + height: 24px; + cursor: pointer; + display: flex; + align-items: center; + justify-content: center; + border-radius: 5px; + float: right; + background-color: var(--white); + } +} + .chat-input-actions { display: flex; flex-wrap: wrap; @@ -189,12 +231,10 @@ animation: slide-in ease 0.3s; - $linear: linear-gradient( - to right, - rgba(0, 0, 0, 0), - rgba(0, 0, 0, 1), - rgba(0, 0, 0, 0) - ); + $linear: linear-gradient(to right, + rgba(0, 0, 0, 0), + rgba(0, 0, 0, 1), + rgba(0, 0, 0, 0)); mask-image: $linear; @mixin show { @@ -327,7 +367,7 @@ } } -.chat-message-user > .chat-message-container { +.chat-message-user>.chat-message-container { align-items: flex-end; } @@ -349,6 +389,7 @@ padding: 7px; } } + /* Specific styles for iOS devices */ @media screen and (max-device-width: 812px) and (-webkit-min-device-pixel-ratio: 2) { @supports (-webkit-touch-callout: none) { @@ -381,6 +422,64 @@ transition: all ease 0.3s; } +.chat-message-item-image { + width: 100%; + margin-top: 10px; +} + +.chat-message-item-images { + width: 100%; + display: grid; + justify-content: left; + grid-gap: 10px; + grid-template-columns: repeat(var(--image-count), auto); + margin-top: 10px; +} + +.chat-message-item-image-multi { + object-fit: cover; + background-size: cover; + background-position: center; + background-repeat: no-repeat; +} + +.chat-message-item-image, +.chat-message-item-image-multi { + box-sizing: border-box; + border-radius: 10px; + border: rgba($color: #888, $alpha: 0.2) 1px solid; +} + + +@media only screen and (max-width: 600px) { + $calc-image-width: calc(100vw/3*2/var(--image-count)); + + .chat-message-item-image-multi { + width: $calc-image-width; + height: $calc-image-width; + } + + .chat-message-item-image { + max-width: calc(100vw/3*2); + } +} + +@media screen and (min-width: 600px) { + $max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count)); + $image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count)); + + .chat-message-item-image-multi { + width: $image-width; + height: $image-width; + max-width: $max-image-width; + max-height: $max-image-width; + } + + .chat-message-item-image { + max-width: calc(calc(1200px - var(--sidebar-width))/3*2); + } +} + .chat-message-action-date { font-size: 12px; opacity: 0.2; @@ -395,7 +494,7 @@ z-index: 1; } -.chat-message-user > .chat-message-container > .chat-message-item { +.chat-message-user>.chat-message-container>.chat-message-item { background-color: var(--second); &:hover { @@ -460,6 +559,7 @@ @include single-line(); } + .hint-content { font-size: 12px; @@ -474,15 +574,26 @@ } .chat-input-panel-inner { + cursor: text; display: flex; flex: 1; + border-radius: 10px; + border: var(--border-in-light); +} + +.chat-input-panel-inner-attach { + padding-bottom: 80px; +} + +.chat-input-panel-inner:has(.chat-input:focus) { + border: 1px solid var(--primary); } .chat-input { height: 100%; width: 100%; border-radius: 10px; - border: var(--border-in-light); + border: none; box-shadow: 0 -2px 5px rgba(0, 0, 0, 0.03); background-color: var(--white); color: var(--black); @@ -494,9 +605,7 @@ min-height: 68px; } -.chat-input:focus { - border: 1px solid var(--primary); -} +.chat-input:focus {} .chat-input-send { background-color: var(--primary); @@ -515,4 +624,4 @@ .chat-input-send { bottom: 30px; } -} +} \ No newline at end of file diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 39abdd97b..b9750f285 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -6,6 +6,7 @@ import React, { useMemo, useCallback, Fragment, + RefObject, } from "react"; import SendWhiteIcon from "../icons/send-white.svg"; @@ -15,6 +16,7 @@ import ExportIcon from "../icons/share.svg"; import ReturnIcon from "../icons/return.svg"; import CopyIcon from "../icons/copy.svg"; import LoadingIcon from "../icons/three-dots.svg"; +import LoadingButtonIcon from "../icons/loading.svg"; import PromptIcon from "../icons/prompt.svg"; import MaskIcon from "../icons/mask.svg"; import MaxIcon from "../icons/max.svg"; @@ -27,6 +29,7 @@ import PinIcon from "../icons/pin.svg"; import EditIcon from "../icons/rename.svg"; import ConfirmIcon from "../icons/confirm.svg"; import CancelIcon from "../icons/cancel.svg"; +import ImageIcon from "../icons/image.svg"; import LightIcon from "../icons/light.svg"; import DarkIcon from "../icons/dark.svg"; @@ -53,6 +56,10 @@ import { selectOrCopy, autoGrowTextArea, useMobileScreen, + getMessageTextContent, + getMessageImages, + isVisionModel, + compressImage, } from "../utils"; import dynamic from "next/dynamic"; @@ -89,6 +96,7 @@ import { prettyObject } from "../utils/format"; import { ExportMessageModal } from "./exporter"; import { getClientConfig } from "../config/client"; import { useAllModels } from "../utils/hooks"; +import { MultimodalContent } from "../client/api"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -211,6 +219,8 @@ function useSubmitHandler() { }, []); const shouldSubmit = (e: React.KeyboardEvent) => { + // Fix Chinese input method "Enter" on Safari + if (e.keyCode == 229) return false; if (e.key !== "Enter") return false; if (e.key === "Enter" && (e.nativeEvent.isComposing || isComposing.current)) return false; @@ -375,11 +385,13 @@ function ChatAction(props: { ); } -function useScrollToBottom() { +function useScrollToBottom( + scrollRef: RefObject, + detach: boolean = false, +) { // for auto-scroll - const scrollRef = useRef(null); - const [autoScroll, setAutoScroll] = useState(true); + const [autoScroll, setAutoScroll] = useState(true); function scrollDomToBottom() { const dom = scrollRef.current; if (dom) { @@ -392,7 +404,7 @@ function useScrollToBottom() { // auto scroll useEffect(() => { - if (autoScroll) { + if (autoScroll && !detach) { scrollDomToBottom(); } }); @@ -406,10 +418,14 @@ function useScrollToBottom() { } export function ChatActions(props: { + uploadImage: () => void; + setAttachImages: (images: string[]) => void; + setUploading: (uploading: boolean) => void; showPromptModal: () => void; scrollToBottom: () => void; showPromptHints: () => void; hitBottom: boolean; + uploading: boolean; }) { const config = useAppConfig(); const navigate = useNavigate(); @@ -437,8 +453,16 @@ export function ChatActions(props: { [allModels], ); const [showModelSelector, setShowModelSelector] = useState(false); + const [showUploadImage, setShowUploadImage] = useState(false); useEffect(() => { + const show = isVisionModel(currentModel); + setShowUploadImage(show); + if (!show) { + props.setAttachImages([]); + props.setUploading(false); + } + // if current model is not available // switch to first available model const isUnavaliableModel = !models.some((m) => m.name === currentModel); @@ -475,6 +499,13 @@ export function ChatActions(props: { /> )} + {showUploadImage && ( + : } + /> + )} void }) { ); } +export function DeleteImageButton(props: { deleteImage: () => void }) { + return ( +
+ +
+ ); +} + function _Chat() { type RenderMessage = ChatMessage & { preview?: boolean }; @@ -624,10 +663,22 @@ function _Chat() { const [userInput, setUserInput] = useState(""); const [isLoading, setIsLoading] = useState(false); const { submitKey, shouldSubmit } = useSubmitHandler(); - const { scrollRef, setAutoScroll, scrollDomToBottom } = useScrollToBottom(); + const scrollRef = useRef(null); + const isScrolledToBottom = scrollRef?.current + ? Math.abs( + scrollRef.current.scrollHeight - + (scrollRef.current.scrollTop + scrollRef.current.clientHeight), + ) <= 1 + : false; + const { setAutoScroll, scrollDomToBottom } = useScrollToBottom( + scrollRef, + isScrolledToBottom, + ); const [hitBottom, setHitBottom] = useState(true); const isMobileScreen = useMobileScreen(); const navigate = useNavigate(); + const [attachImages, setAttachImages] = useState([]); + const [uploading, setUploading] = useState(false); // prompt hints const promptStore = usePromptStore(); @@ -705,7 +756,10 @@ function _Chat() { return; } setIsLoading(true); - chatStore.onUserInput(userInput).then(() => setIsLoading(false)); + chatStore + .onUserInput(userInput, attachImages) + .then(() => setIsLoading(false)); + setAttachImages([]); localStorage.setItem(LAST_INPUT_KEY, userInput); setUserInput(""); setPromptHints([]); @@ -783,9 +837,9 @@ function _Chat() { }; const onRightClick = (e: any, message: ChatMessage) => { // copy to clipboard - if (selectOrCopy(e.currentTarget, message.content)) { + if (selectOrCopy(e.currentTarget, getMessageTextContent(message))) { if (userInput.length === 0) { - setUserInput(message.content); + setUserInput(getMessageTextContent(message)); } e.preventDefault(); @@ -853,7 +907,9 @@ function _Chat() { // resend the message setIsLoading(true); - chatStore.onUserInput(userMessage.content).then(() => setIsLoading(false)); + const textContent = getMessageTextContent(userMessage); + const images = getMessageImages(userMessage); + chatStore.onUserInput(textContent, images).then(() => setIsLoading(false)); inputRef.current?.focus(); }; @@ -962,7 +1018,6 @@ function _Chat() { setHitBottom(isHitBottom); setAutoScroll(isHitBottom); }; - function scrollToBottom() { setMsgRenderIndex(renderMessages.length - CHAT_PAGE_SIZE); scrollDomToBottom(); @@ -1047,6 +1102,92 @@ function _Chat() { }; // eslint-disable-next-line react-hooks/exhaustive-deps }, []); + + const handlePaste = useCallback( + async (event: React.ClipboardEvent) => { + const currentModel = chatStore.currentSession().mask.modelConfig.model; + if(!isVisionModel(currentModel)){return;} + const items = (event.clipboardData || window.clipboardData).items; + for (const item of items) { + if (item.kind === "file" && item.type.startsWith("image/")) { + event.preventDefault(); + const file = item.getAsFile(); + if (file) { + const images: string[] = []; + images.push(...attachImages); + images.push( + ...(await new Promise((res, rej) => { + setUploading(true); + const imagesData: string[] = []; + compressImage(file, 256 * 1024) + .then((dataUrl) => { + imagesData.push(dataUrl); + setUploading(false); + res(imagesData); + }) + .catch((e) => { + setUploading(false); + rej(e); + }); + })), + ); + const imagesLength = images.length; + + if (imagesLength > 3) { + images.splice(3, imagesLength - 3); + } + setAttachImages(images); + } + } + } + }, + [attachImages, chatStore], + ); + + async function uploadImage() { + const images: string[] = []; + images.push(...attachImages); + + images.push( + ...(await new Promise((res, rej) => { + const fileInput = document.createElement("input"); + fileInput.type = "file"; + fileInput.accept = + "image/png, image/jpeg, image/webp, image/heic, image/heif"; + fileInput.multiple = true; + fileInput.onchange = (event: any) => { + setUploading(true); + const files = event.target.files; + const imagesData: string[] = []; + for (let i = 0; i < files.length; i++) { + const file = event.target.files[i]; + compressImage(file, 256 * 1024) + .then((dataUrl) => { + imagesData.push(dataUrl); + if ( + imagesData.length === 3 || + imagesData.length === files.length + ) { + setUploading(false); + res(imagesData); + } + }) + .catch((e) => { + setUploading(false); + rej(e); + }); + } + }; + fileInput.click(); + })), + ); + + const imagesLength = images.length; + if (imagesLength > 3) { + images.splice(3, imagesLength - 3); + } + setAttachImages(images); + } return (
@@ -1154,15 +1295,29 @@ function _Chat() { onClick={async () => { const newMessage = await showPrompt( Locale.Chat.Actions.Edit, - message.content, + getMessageTextContent(message), 10, ); + let newContent: string | MultimodalContent[] = + newMessage; + const images = getMessageImages(message); + if (images.length > 0) { + newContent = [{ type: "text", text: newMessage }]; + for (let i = 0; i < images.length; i++) { + newContent.push({ + type: "image_url", + image_url: { + url: images[i], + }, + }); + } + } chatStore.updateCurrentSession((session) => { const m = session.mask.context .concat(session.messages) .find((m) => m.id === message.id); if (m) { - m.content = newMessage; + m.content = newContent; } }); }} @@ -1217,7 +1372,11 @@ function _Chat() { } - onClick={() => copyToClipboard(message.content)} + onClick={() => + copyToClipboard( + getMessageTextContent(message), + ) + } /> )} @@ -1232,7 +1391,7 @@ function _Chat() { )}
onRightClick(e, message)} onDoubleClickCapture={() => { if (!isMobileScreen) return; - setUserInput(message.content); + setUserInput(getMessageTextContent(message)); }} fontSize={fontSize} parentRef={scrollRef} defaultShow={i >= messages.length - 6} /> + {getMessageImages(message).length == 1 && ( + + )} + {getMessageImages(message).length > 1 && ( +
+ {getMessageImages(message).map((image, index) => { + return ( + + ); + })} +
+ )}
@@ -1266,9 +1455,13 @@ function _Chat() { setShowPromptModal(true)} scrollToBottom={scrollToBottom} hitBottom={hitBottom} + uploading={uploading} showPromptHints={() => { // Click again to close if (promptHints.length > 0) { @@ -1281,8 +1474,16 @@ function _Chat() { onSearch(""); }} /> -
+