mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-20 14:07:14 +08:00
add voice config
This commit is contained in:
@@ -23,6 +23,7 @@ import {
|
|||||||
RTInputAudioItem,
|
RTInputAudioItem,
|
||||||
RTResponse,
|
RTResponse,
|
||||||
TurnDetection,
|
TurnDetection,
|
||||||
|
Voice,
|
||||||
} from "rt-client";
|
} from "rt-client";
|
||||||
import { AudioHandler } from "@/app/lib/audio";
|
import { AudioHandler } from "@/app/lib/audio";
|
||||||
import { uploadImage } from "@/app/utils/chat";
|
import { uploadImage } from "@/app/utils/chat";
|
||||||
@@ -54,6 +55,7 @@ export function RealtimeChat({
|
|||||||
const [endpoint, setEndpoint] = useState("");
|
const [endpoint, setEndpoint] = useState("");
|
||||||
const [deployment, setDeployment] = useState("");
|
const [deployment, setDeployment] = useState("");
|
||||||
const [useVAD, setUseVAD] = useState(true);
|
const [useVAD, setUseVAD] = useState(true);
|
||||||
|
const [voice, setVoice] = useState<Voice>("alloy");
|
||||||
|
|
||||||
const clientRef = useRef<RTClient | null>(null);
|
const clientRef = useRef<RTClient | null>(null);
|
||||||
const audioHandlerRef = useRef<AudioHandler | null>(null);
|
const audioHandlerRef = useRef<AudioHandler | null>(null);
|
||||||
@@ -78,6 +80,7 @@ export function RealtimeChat({
|
|||||||
: null;
|
: null;
|
||||||
clientRef.current.configure({
|
clientRef.current.configure({
|
||||||
instructions: "",
|
instructions: "",
|
||||||
|
voice,
|
||||||
input_audio_transcription: { model: "whisper-1" },
|
input_audio_transcription: { model: "whisper-1" },
|
||||||
turn_detection: turnDetection,
|
turn_detection: turnDetection,
|
||||||
tools: [],
|
tools: [],
|
||||||
|
Reference in New Issue
Block a user