mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-30 18:46:58 +08:00
fix: update ui
This commit is contained in:
@@ -58,7 +58,7 @@
|
||||
box-shadow: var(--card-shadow);
|
||||
transition: width ease 0.3s;
|
||||
align-items: center;
|
||||
height: 16px;
|
||||
height: 24px;
|
||||
width: var(--icon-width);
|
||||
overflow: hidden;
|
||||
|
||||
@@ -68,7 +68,6 @@
|
||||
|
||||
.text {
|
||||
white-space: nowrap;
|
||||
padding-left: 5px;
|
||||
opacity: 0;
|
||||
transform: translateX(-5px);
|
||||
transition: all ease 0.3s;
|
||||
@@ -610,10 +609,6 @@
|
||||
.chat-input-send {
|
||||
background-color: var(--primary);
|
||||
color: white;
|
||||
|
||||
position: absolute;
|
||||
right: 30px;
|
||||
bottom: 32px;
|
||||
}
|
||||
|
||||
@media only screen and (max-width: 600px) {
|
||||
|
@@ -97,7 +97,7 @@ import { ExportMessageModal } from "./exporter";
|
||||
import { getClientConfig } from "../config/client";
|
||||
import { useAllModels } from "../utils/hooks";
|
||||
import { MultimodalContent } from "../client/api";
|
||||
|
||||
import SpeechRecorder from "./chat/speechRecorder";
|
||||
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
|
||||
loading: () => <LoadingIcon />,
|
||||
});
|
||||
@@ -345,7 +345,7 @@ function ChatAction(props: {
|
||||
full: 16,
|
||||
icon: 16,
|
||||
});
|
||||
|
||||
const [isActive, setIsActive] = useState(false);
|
||||
function updateWidth() {
|
||||
if (!iconRef.current || !textRef.current) return;
|
||||
const getWidth = (dom: HTMLDivElement) => dom.getBoundingClientRect().width;
|
||||
@@ -359,25 +359,22 @@ function ChatAction(props: {
|
||||
|
||||
return (
|
||||
<div
|
||||
className={`${styles["chat-input-action"]} clickable`}
|
||||
className={`${styles["chat-input-action"]} clickable group`}
|
||||
onClick={() => {
|
||||
props.onClick();
|
||||
setTimeout(updateWidth, 1);
|
||||
}}
|
||||
onMouseEnter={updateWidth}
|
||||
onTouchStart={updateWidth}
|
||||
style={
|
||||
{
|
||||
"--icon-width": `${width.icon}px`,
|
||||
"--full-width": `${width.full}px`,
|
||||
} as React.CSSProperties
|
||||
}
|
||||
>
|
||||
<div ref={iconRef} className={styles["icon"]}>
|
||||
{props.icon}
|
||||
</div>
|
||||
<div className={styles["text"]} ref={textRef}>
|
||||
{props.text}
|
||||
<div className="flex">
|
||||
<div ref={iconRef} className={styles["icon"]}>
|
||||
{props.icon}
|
||||
</div>
|
||||
<div
|
||||
className={`${styles["text"]} transition-all duration-1000 w-0 group-hover:w-[60px]`}
|
||||
ref={textRef}
|
||||
>
|
||||
{props.text}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
@@ -454,15 +451,6 @@ export function ChatActions(props: {
|
||||
const [showModelSelector, setShowModelSelector] = useState(false);
|
||||
const [showUploadImage, setShowUploadImage] = useState(false);
|
||||
|
||||
const [speechRecognition, setSpeechRecognition] = useState(null);
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
useEffect(() => {
|
||||
if ("SpeechRecognition" in window) {
|
||||
setSpeechRecognition(new window.SpeechRecognition());
|
||||
} else if ("webkitSpeechRecognition" in window) {
|
||||
setSpeechRecognition(new window.webkitSpeechRecognition());
|
||||
}
|
||||
}, []);
|
||||
useEffect(() => {
|
||||
const show = isVisionModel(currentModel);
|
||||
setShowUploadImage(show);
|
||||
@@ -485,30 +473,6 @@ export function ChatActions(props: {
|
||||
|
||||
return (
|
||||
<div className={styles["chat-input-actions"]}>
|
||||
{speechRecognition && (
|
||||
<ChatAction
|
||||
onClick={() => {
|
||||
if (!isRecording) {
|
||||
speechRecognition.continuous = true; // 连续识别
|
||||
speechRecognition.lang = "zh-CN"; // 设置识别的语言为中文
|
||||
speechRecognition.interimResults = true; // 返回临时结果
|
||||
speechRecognition.start();
|
||||
speechRecognition.onresult = function (event) {
|
||||
console.log(event);
|
||||
var transcript = event.results[0][0].transcript; // 获取识别结果
|
||||
console.log(transcript);
|
||||
props.setUserInput(transcript);
|
||||
};
|
||||
setIsRecording(true);
|
||||
} else {
|
||||
speechRecognition.stop();
|
||||
setIsRecording(false);
|
||||
}
|
||||
}}
|
||||
text="Speech"
|
||||
icon={<BrainIcon />}
|
||||
></ChatAction>
|
||||
)}
|
||||
{couldStop && (
|
||||
<ChatAction
|
||||
onClick={stopAll}
|
||||
@@ -1513,13 +1477,16 @@ function _Chat() {
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
<IconButton
|
||||
icon={<SendWhiteIcon />}
|
||||
text={Locale.Chat.Send}
|
||||
className={styles["chat-input-send"]}
|
||||
type="primary"
|
||||
onClick={() => doSubmit(userInput)}
|
||||
/>
|
||||
<div className="flex gap-2 absolute right-[30px] bottom-[32px]">
|
||||
<SpeechRecorder textUpdater={setUserInput}></SpeechRecorder>
|
||||
<IconButton
|
||||
icon={<SendWhiteIcon />}
|
||||
text={Locale.Chat.Send}
|
||||
className={styles["chat-input-send"]}
|
||||
type="primary"
|
||||
onClick={() => doSubmit(userInput)}
|
||||
/>
|
||||
</div>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
|
50
app/components/chat/speechRecorder.tsx
Normal file
50
app/components/chat/speechRecorder.tsx
Normal file
@@ -0,0 +1,50 @@
|
||||
import React, { useState, useEffect } from "react";
|
||||
|
||||
type SpeechRecognitionType =
|
||||
| typeof window.SpeechRecognition
|
||||
| typeof window.webkitSpeechRecognition;
|
||||
|
||||
export default function SpeechRecorder({
|
||||
textUpdater,
|
||||
onStop,
|
||||
}: {
|
||||
textUpdater: (text: string) => void;
|
||||
onStop?: () => void;
|
||||
}) {
|
||||
const [speechRecognition, setSpeechRecognition] =
|
||||
useState<SpeechRecognitionType | null>(null);
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
useEffect(() => {
|
||||
if ("SpeechRecognition" in window) {
|
||||
setSpeechRecognition(new (window as any).SpeechRecognition());
|
||||
} else if ("webkitSpeechRecognition" in window) {
|
||||
setSpeechRecognition(new (window as any).webkitSpeechRecognition());
|
||||
}
|
||||
}, []);
|
||||
return (
|
||||
<div>
|
||||
<button
|
||||
onClick={() => {
|
||||
if (!isRecording && speechRecognition) {
|
||||
speechRecognition.continuous = true; // 连续识别
|
||||
speechRecognition.lang = "zh-CN"; // 设置识别的语言为中文
|
||||
speechRecognition.interimResults = true; // 返回临时结果
|
||||
speechRecognition.start();
|
||||
speechRecognition.onresult = function (event: any) {
|
||||
console.log(event);
|
||||
var transcript = event.results[0][0].transcript; // 获取识别结果
|
||||
console.log(transcript);
|
||||
textUpdater(transcript);
|
||||
};
|
||||
setIsRecording(true);
|
||||
} else {
|
||||
speechRecognition.stop();
|
||||
setIsRecording(false);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{isRecording ? "输入中" : "点击说话"}
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
}
|
@@ -30,6 +30,8 @@ declare global {
|
||||
// google only
|
||||
GOOGLE_API_KEY?: string;
|
||||
GOOGLE_URL?: string;
|
||||
|
||||
GTM_ID?: string;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,3 +1,6 @@
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
@import "./animation.scss";
|
||||
@import "./window.scss";
|
||||
|
||||
|
@@ -50,6 +50,7 @@
|
||||
"@types/react-dom": "^18.2.7",
|
||||
"@types/react-katex": "^3.0.0",
|
||||
"@types/spark-md5": "^3.0.4",
|
||||
"autoprefixer": "^10.4.18",
|
||||
"cross-env": "^7.0.3",
|
||||
"eslint": "^8.49.0",
|
||||
"eslint-config-next": "13.4.19",
|
||||
@@ -57,11 +58,13 @@
|
||||
"eslint-plugin-prettier": "^4.2.1",
|
||||
"husky": "^8.0.0",
|
||||
"lint-staged": "^13.2.2",
|
||||
"postcss": "^8.4.35",
|
||||
"prettier": "^3.0.2",
|
||||
"tailwindcss": "^3.4.1",
|
||||
"typescript": "5.2.2",
|
||||
"webpack": "^5.88.1"
|
||||
},
|
||||
"resolutions": {
|
||||
"lint-staged/yaml": "^2.2.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
6
postcss.config.js
Normal file
6
postcss.config.js
Normal file
@@ -0,0 +1,6 @@
|
||||
module.exports = {
|
||||
plugins: {
|
||||
tailwindcss: {},
|
||||
autoprefixer: {},
|
||||
},
|
||||
}
|
15
tailwind.config.js
Normal file
15
tailwind.config.js
Normal file
@@ -0,0 +1,15 @@
|
||||
/** @type {import('tailwindcss').Config} */
|
||||
module.exports = {
|
||||
content: [
|
||||
"./app/**/*.{js,ts,jsx,tsx,mdx}",
|
||||
"./pages/**/*.{js,ts,jsx,tsx,mdx}",
|
||||
"./components/**/*.{js,ts,jsx,tsx,mdx}",
|
||||
|
||||
// Or if using `src` directory:
|
||||
"./src/**/*.{js,ts,jsx,tsx,mdx}",
|
||||
],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
}
|
@@ -23,6 +23,12 @@
|
||||
"@/*": ["./*"]
|
||||
}
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts", "app/calcTextareaHeight.ts"],
|
||||
"include": [
|
||||
"next-env.d.ts",
|
||||
"**/*.ts",
|
||||
"**/*.tsx",
|
||||
".next/types/**/*.ts",
|
||||
"app/calcTextareaHeight.ts"
|
||||
],
|
||||
"exclude": ["node_modules"]
|
||||
}
|
||||
|
Reference in New Issue
Block a user