Merge pull request #5459 from DDMeaqua/tts

add tts
This commit is contained in:
Dogtiti 2024-09-18 15:42:16 +08:00 committed by GitHub
commit a8c70d84a9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
28 changed files with 1080 additions and 12 deletions

View File

@ -25,6 +25,7 @@ export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number]; export type MessageRole = (typeof ROLES)[number];
export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
export const TTSModels = ["tts-1", "tts-1-hd"] as const;
export type ChatModel = ModelType; export type ChatModel = ModelType;
export interface MultimodalContent { export interface MultimodalContent {
@ -53,6 +54,15 @@ export interface LLMConfig {
style?: DalleRequestPayload["style"]; style?: DalleRequestPayload["style"];
} }
export interface SpeechOptions {
model: string;
input: string;
voice: string;
response_format?: string;
speed?: number;
onController?: (controller: AbortController) => void;
}
export interface ChatOptions { export interface ChatOptions {
messages: RequestMessage[]; messages: RequestMessage[];
config: LLMConfig; config: LLMConfig;
@ -87,6 +97,7 @@ export interface LLMModelProvider {
export abstract class LLMApi { export abstract class LLMApi {
abstract chat(options: ChatOptions): Promise<void>; abstract chat(options: ChatOptions): Promise<void>;
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
abstract usage(): Promise<LLMUsage>; abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>; abstract models(): Promise<LLMModel[]>;
} }
@ -205,13 +216,16 @@ export function validString(x: string): boolean {
return x?.length > 0; return x?.length > 0;
} }
export function getHeaders() { export function getHeaders(ignoreHeaders: boolean = false) {
const accessStore = useAccessStore.getState(); const accessStore = useAccessStore.getState();
const chatStore = useChatStore.getState(); const chatStore = useChatStore.getState();
const headers: Record<string, string> = { let headers: Record<string, string> = {};
"Content-Type": "application/json", if (!ignoreHeaders) {
Accept: "application/json", headers = {
}; "Content-Type": "application/json",
Accept: "application/json",
};
}
const clientConfig = getClientConfig(); const clientConfig = getClientConfig();

View File

@ -12,6 +12,7 @@ import {
getHeaders, getHeaders,
LLMApi, LLMApi,
LLMModel, LLMModel,
SpeechOptions,
MultimodalContent, MultimodalContent,
} from "../api"; } from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
@ -83,6 +84,10 @@ export class QwenApi implements LLMApi {
return res?.output?.choices?.at(0)?.message?.content ?? ""; return res?.output?.choices?.at(0)?.message?.content ?? "";
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role, role: v.role,

View File

@ -1,5 +1,5 @@
import { Anthropic, ApiPath } from "@/app/constant"; import { Anthropic, ApiPath } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi } from "../api"; import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@ -73,6 +73,10 @@ const ClaudeMapper = {
const keys = ["claude-2, claude-instant-1"]; const keys = ["claude-2, claude-instant-1"];
export class ClaudeApi implements LLMApi { export class ClaudeApi implements LLMApi {
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
extractMessage(res: any) { extractMessage(res: any) {
console.log("[Response] claude response: ", res); console.log("[Response] claude response: ", res);

View File

@ -14,6 +14,7 @@ import {
LLMApi, LLMApi,
LLMModel, LLMModel,
MultimodalContent, MultimodalContent,
SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
import { import {
@ -75,6 +76,10 @@ export class ErnieApi implements LLMApi {
return [baseUrl, path].join("/"); return [baseUrl, path].join("/");
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
// "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function",

View File

@ -13,6 +13,7 @@ import {
LLMApi, LLMApi,
LLMModel, LLMModel,
MultimodalContent, MultimodalContent,
SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
import { import {
@ -77,6 +78,10 @@ export class DoubaoApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? ""; return res.choices?.at(0)?.message?.content ?? "";
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role, role: v.role,

View File

@ -1,5 +1,12 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
SpeechOptions,
} from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { DEFAULT_API_HOST } from "@/app/constant"; import { DEFAULT_API_HOST } from "@/app/constant";
@ -56,6 +63,10 @@ export class GeminiProApi implements LLMApi {
"" ""
); );
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions): Promise<void> { async chat(options: ChatOptions): Promise<void> {
const apiClient = this; const apiClient = this;
let multimodal = false; let multimodal = false;

View File

@ -7,7 +7,13 @@ import {
} from "@/app/constant"; } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
import { import {
EventStreamContentType, EventStreamContentType,
@ -53,6 +59,10 @@ export class SparkApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? ""; return res.choices?.at(0)?.message?.content ?? "";
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {

View File

@ -14,7 +14,13 @@ import {
usePluginStore, usePluginStore,
} from "@/app/store"; } from "@/app/store";
import { stream } from "@/app/utils/chat"; import { stream } from "@/app/utils/chat";
import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
SpeechOptions,
} from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
@ -53,6 +59,10 @@ export class MoonshotApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? ""; return res.choices?.at(0)?.message?.content ?? "";
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {

View File

@ -33,6 +33,7 @@ import {
LLMModel, LLMModel,
LLMUsage, LLMUsage,
MultimodalContent, MultimodalContent,
SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
@ -141,6 +142,44 @@ export class ChatGPTApi implements LLMApi {
return res.choices?.at(0)?.message?.content ?? res; return res.choices?.at(0)?.message?.content ?? res;
} }
async speech(options: SpeechOptions): Promise<ArrayBuffer> {
const requestPayload = {
model: options.model,
input: options.input,
voice: options.voice,
response_format: options.response_format,
speed: options.speed,
};
console.log("[Request] openai speech payload: ", requestPayload);
const controller = new AbortController();
options.onController?.(controller);
try {
const speechPath = this.path(OpenaiPath.SpeechPath);
const speechPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
const res = await fetch(speechPath, speechPayload);
clearTimeout(requestTimeoutId);
return await res.arrayBuffer();
} catch (e) {
console.log("[Request] failed to make a speech request", e);
throw e;
}
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,

View File

@ -8,6 +8,7 @@ import {
LLMApi, LLMApi,
LLMModel, LLMModel,
MultimodalContent, MultimodalContent,
SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales"; import Locale from "../../locales";
import { import {
@ -89,6 +90,10 @@ export class HunyuanApi implements LLMApi {
return res.Choices?.at(0)?.Message?.Content ?? ""; return res.Choices?.at(0)?.Message?.Content ?? "";
} }
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error("Method not implemented.");
}
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model); const visionModel = isVisionModel(options.config.model);
const messages = options.messages.map((v, index) => ({ const messages = options.messages.map((v, index) => ({

View File

@ -15,6 +15,8 @@ import RenameIcon from "../icons/rename.svg";
import ExportIcon from "../icons/share.svg"; import ExportIcon from "../icons/share.svg";
import ReturnIcon from "../icons/return.svg"; import ReturnIcon from "../icons/return.svg";
import CopyIcon from "../icons/copy.svg"; import CopyIcon from "../icons/copy.svg";
import SpeakIcon from "../icons/speak.svg";
import SpeakStopIcon from "../icons/speak-stop.svg";
import LoadingIcon from "../icons/three-dots.svg"; import LoadingIcon from "../icons/three-dots.svg";
import LoadingButtonIcon from "../icons/loading.svg"; import LoadingButtonIcon from "../icons/loading.svg";
import PromptIcon from "../icons/prompt.svg"; import PromptIcon from "../icons/prompt.svg";
@ -96,6 +98,8 @@ import {
import { useNavigate } from "react-router-dom"; import { useNavigate } from "react-router-dom";
import { import {
CHAT_PAGE_SIZE, CHAT_PAGE_SIZE,
DEFAULT_TTS_ENGINE,
ModelProvider,
Path, Path,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
UNFINISHED_INPUT, UNFINISHED_INPUT,
@ -112,6 +116,11 @@ import { useAllModels } from "../utils/hooks";
import { MultimodalContent } from "../client/api"; import { MultimodalContent } from "../client/api";
const localStorage = safeLocalStorage(); const localStorage = safeLocalStorage();
import { ClientApi } from "../client/api";
import { createTTSPlayer } from "../utils/audio";
import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts";
const ttsPlayer = createTTSPlayer();
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />, loading: () => <LoadingIcon />,
@ -442,6 +451,7 @@ export function ChatActions(props: {
hitBottom: boolean; hitBottom: boolean;
uploading: boolean; uploading: boolean;
setShowShortcutKeyModal: React.Dispatch<React.SetStateAction<boolean>>; setShowShortcutKeyModal: React.Dispatch<React.SetStateAction<boolean>>;
setUserInput: (input: string) => void;
}) { }) {
const config = useAppConfig(); const config = useAppConfig();
const navigate = useNavigate(); const navigate = useNavigate();
@ -1184,10 +1194,55 @@ function _Chat() {
}); });
}; };
const accessStore = useAccessStore();
const [speechStatus, setSpeechStatus] = useState(false);
const [speechLoading, setSpeechLoading] = useState(false);
async function openaiSpeech(text: string) {
if (speechStatus) {
ttsPlayer.stop();
setSpeechStatus(false);
} else {
var api: ClientApi;
api = new ClientApi(ModelProvider.GPT);
const config = useAppConfig.getState();
setSpeechLoading(true);
ttsPlayer.init();
let audioBuffer: ArrayBuffer;
const { markdownToTxt } = require("markdown-to-txt");
const textContent = markdownToTxt(text);
if (config.ttsConfig.engine !== DEFAULT_TTS_ENGINE) {
const edgeVoiceName = accessStore.edgeVoiceName();
const tts = new MsEdgeTTS();
await tts.setMetadata(
edgeVoiceName,
OUTPUT_FORMAT.AUDIO_24KHZ_96KBITRATE_MONO_MP3,
);
audioBuffer = await tts.toArrayBuffer(textContent);
} else {
audioBuffer = await api.llm.speech({
model: config.ttsConfig.model,
input: textContent,
voice: config.ttsConfig.voice,
speed: config.ttsConfig.speed,
});
}
setSpeechStatus(true);
ttsPlayer
.play(audioBuffer, () => {
setSpeechStatus(false);
})
.catch((e) => {
console.error("[OpenAI Speech]", e);
showToast(prettyObject(e));
setSpeechStatus(false);
})
.finally(() => setSpeechLoading(false));
}
}
const context: RenderMessage[] = useMemo(() => { const context: RenderMessage[] = useMemo(() => {
return session.mask.hideContext ? [] : session.mask.context.slice(); return session.mask.hideContext ? [] : session.mask.context.slice();
}, [session.mask.context, session.mask.hideContext]); }, [session.mask.context, session.mask.hideContext]);
const accessStore = useAccessStore();
if ( if (
context.length === 0 && context.length === 0 &&
@ -1724,6 +1779,25 @@ function _Chat() {
) )
} }
/> />
{config.ttsConfig.enable && (
<ChatAction
text={
speechStatus
? Locale.Chat.Actions.StopSpeech
: Locale.Chat.Actions.Speech
}
icon={
speechStatus ? (
<SpeakStopIcon />
) : (
<SpeakIcon />
)
}
onClick={() =>
openaiSpeech(getMessageTextContent(message))
}
/>
)}
</> </>
)} )}
</div> </div>
@ -1842,6 +1916,7 @@ function _Chat() {
onSearch(""); onSearch("");
}} }}
setShowShortcutKeyModal={setShowShortcutKeyModal} setShowShortcutKeyModal={setShowShortcutKeyModal}
setUserInput={setUserInput}
/> />
<label <label
className={`${styles["chat-input-panel-inner"]} ${ className={`${styles["chat-input-panel-inner"]} ${

View File

@ -80,6 +80,7 @@ import { useSyncStore } from "../store/sync";
import { nanoid } from "nanoid"; import { nanoid } from "nanoid";
import { useMaskStore } from "../store/mask"; import { useMaskStore } from "../store/mask";
import { ProviderType } from "../utils/cloud"; import { ProviderType } from "../utils/cloud";
import { TTSConfigList } from "./tts-config";
function EditPromptModal(props: { id: string; onClose: () => void }) { function EditPromptModal(props: { id: string; onClose: () => void }) {
const promptStore = usePromptStore(); const promptStore = usePromptStore();
@ -1646,6 +1647,17 @@ export function Settings() {
<UserPromptModal onClose={() => setShowPromptModal(false)} /> <UserPromptModal onClose={() => setShowPromptModal(false)} />
)} )}
<List>
<TTSConfigList
ttsConfig={config.ttsConfig}
updateConfig={(updater) => {
const ttsConfig = { ...config.ttsConfig };
updater(ttsConfig);
config.update((config) => (config.ttsConfig = ttsConfig));
}}
/>
</List>
<DangerItems /> <DangerItems />
</div> </div>
</ErrorBoundary> </ErrorBoundary>

View File

@ -0,0 +1,133 @@
import { TTSConfig, TTSConfigValidator } from "../store";
import Locale from "../locales";
import { ListItem, Select } from "./ui-lib";
import {
DEFAULT_TTS_ENGINE,
DEFAULT_TTS_ENGINES,
DEFAULT_TTS_MODELS,
DEFAULT_TTS_VOICES,
} from "../constant";
import { InputRange } from "./input-range";
export function TTSConfigList(props: {
ttsConfig: TTSConfig;
updateConfig: (updater: (config: TTSConfig) => void) => void;
}) {
return (
<>
<ListItem
title={Locale.Settings.TTS.Enable.Title}
subTitle={Locale.Settings.TTS.Enable.SubTitle}
>
<input
type="checkbox"
checked={props.ttsConfig.enable}
onChange={(e) =>
props.updateConfig(
(config) => (config.enable = e.currentTarget.checked),
)
}
></input>
</ListItem>
{/* <ListItem
title={Locale.Settings.TTS.Autoplay.Title}
subTitle={Locale.Settings.TTS.Autoplay.SubTitle}
>
<input
type="checkbox"
checked={props.ttsConfig.autoplay}
onChange={(e) =>
props.updateConfig(
(config) => (config.autoplay = e.currentTarget.checked),
)
}
></input>
</ListItem> */}
<ListItem title={Locale.Settings.TTS.Engine}>
<Select
value={props.ttsConfig.engine}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.engine = TTSConfigValidator.engine(
e.currentTarget.value,
)),
);
}}
>
{DEFAULT_TTS_ENGINES.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
{props.ttsConfig.engine === DEFAULT_TTS_ENGINE && (
<>
<ListItem title={Locale.Settings.TTS.Model}>
<Select
value={props.ttsConfig.model}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.model = TTSConfigValidator.model(
e.currentTarget.value,
)),
);
}}
>
{DEFAULT_TTS_MODELS.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.TTS.Voice.Title}
subTitle={Locale.Settings.TTS.Voice.SubTitle}
>
<Select
value={props.ttsConfig.voice}
onChange={(e) => {
props.updateConfig(
(config) =>
(config.voice = TTSConfigValidator.voice(
e.currentTarget.value,
)),
);
}}
>
{DEFAULT_TTS_VOICES.map((v, i) => (
<option value={v} key={i}>
{v}
</option>
))}
</Select>
</ListItem>
<ListItem
title={Locale.Settings.TTS.Speed.Title}
subTitle={Locale.Settings.TTS.Speed.SubTitle}
>
<InputRange
aria={Locale.Settings.TTS.Speed.Title}
value={props.ttsConfig.speed?.toFixed(1)}
min="0.3"
max="4.0"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.speed = TTSConfigValidator.speed(
e.currentTarget.valueAsNumber,
)),
);
}}
></InputRange>
</ListItem>
</>
)}
</>
);
}

View File

@ -0,0 +1,119 @@
@import "../styles/animation.scss";
.plugin-page {
height: 100%;
display: flex;
flex-direction: column;
.plugin-page-body {
padding: 20px;
overflow-y: auto;
.plugin-filter {
width: 100%;
max-width: 100%;
margin-bottom: 20px;
animation: slide-in ease 0.3s;
height: 40px;
display: flex;
.search-bar {
flex-grow: 1;
max-width: 100%;
min-width: 0;
outline: none;
}
.search-bar:focus {
border: 1px solid var(--primary);
}
.plugin-filter-lang {
height: 100%;
margin-left: 10px;
}
.plugin-create {
height: 100%;
margin-left: 10px;
box-sizing: border-box;
min-width: 80px;
}
}
.plugin-item {
display: flex;
justify-content: space-between;
padding: 20px;
border: var(--border-in-light);
animation: slide-in ease 0.3s;
&:not(:last-child) {
border-bottom: 0;
}
&:first-child {
border-top-left-radius: 10px;
border-top-right-radius: 10px;
}
&:last-child {
border-bottom-left-radius: 10px;
border-bottom-right-radius: 10px;
}
.plugin-header {
display: flex;
align-items: center;
.plugin-icon {
display: flex;
align-items: center;
justify-content: center;
margin-right: 10px;
}
.plugin-title {
.plugin-name {
font-size: 14px;
font-weight: bold;
}
.plugin-info {
font-size: 12px;
}
.plugin-runtime-warning {
font-size: 12px;
color: #f86c6c;
}
}
}
.plugin-actions {
display: flex;
flex-wrap: nowrap;
transition: all ease 0.3s;
justify-content: center;
align-items: center;
}
@media screen and (max-width: 600px) {
display: flex;
flex-direction: column;
padding-bottom: 10px;
border-radius: 10px;
margin-bottom: 20px;
box-shadow: var(--card-shadow);
&:not(:last-child) {
border-bottom: var(--border-in-light);
}
.plugin-actions {
width: 100%;
justify-content: space-between;
padding-top: 10px;
}
}
}
}
}

View File

@ -150,6 +150,7 @@ export const Anthropic = {
export const OpenaiPath = { export const OpenaiPath = {
ChatPath: "v1/chat/completions", ChatPath: "v1/chat/completions",
SpeechPath: "v1/audio/speech",
ImagePath: "v1/images/generations", ImagePath: "v1/images/generations",
UsagePath: "dashboard/billing/usage", UsagePath: "dashboard/billing/usage",
SubsPath: "dashboard/billing/subscription", SubsPath: "dashboard/billing/subscription",
@ -256,6 +257,20 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gemini-pro-vision": "2023-12", "gemini-pro-vision": "2023-12",
}; };
export const DEFAULT_TTS_ENGINE = "OpenAI-TTS";
export const DEFAULT_TTS_ENGINES = ["OpenAI-TTS", "Edge-TTS"];
export const DEFAULT_TTS_MODEL = "tts-1";
export const DEFAULT_TTS_VOICE = "alloy";
export const DEFAULT_TTS_MODELS = ["tts-1", "tts-1-hd"];
export const DEFAULT_TTS_VOICES = [
"alloy",
"echo",
"fable",
"onyx",
"nova",
"shimmer",
];
const openaiModels = [ const openaiModels = [
"gpt-3.5-turbo", "gpt-3.5-turbo",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-1106",

1
app/icons/speak-stop.svg Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" width="16" height="16" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-4 h-4"><path stroke-linecap="round" stroke-linejoin="round" d="M17.25 9.75 19.5 12m0 0 2.25 2.25M19.5 12l2.25-2.25M19.5 12l-2.25 2.25m-10.5-6 4.72-4.72a.75.75 0 0 1 1.28.53v15.88a.75.75 0 0 1-1.28.53l-4.72-4.72H4.51c-.88 0-1.704-.507-1.938-1.354A9.009 9.009 0 0 1 2.25 12c0-.83.112-1.633.322-2.396C2.806 8.756 3.63 8.25 4.51 8.25H6.75Z"></path></svg>

After

Width:  |  Height:  |  Size: 495 B

1
app/icons/speak.svg Normal file
View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" fill="none" width="16" height="16" viewBox="0 0 24 24" stroke-width="1.5" stroke="currentColor" class="w-4 h-4"><path stroke-linecap="round" stroke-linejoin="round" d="M19.114 5.636a9 9 0 010 12.728M16.463 8.288a5.25 5.25 0 010 7.424M6.75 8.25l4.72-4.72a.75.75 0 011.28.53v15.88a.75.75 0 01-1.28.53l-4.72-4.72H4.51c-.88 0-1.704-.507-1.938-1.354A9.01 9.01 0 012.25 12c0-.83.112-1.633.322-2.396C2.806 8.756 3.63 8.25 4.51 8.25H6.75z"></path></svg>

After

Width:  |  Height:  |  Size: 485 B

16
app/icons/voice-white.svg Normal file
View File

@ -0,0 +1,16 @@
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="16" height="16" fill="none" viewBox="0 0 20 20">
<defs>
<rect id="path_0" width="20" height="20" x="0" y="0" />
</defs>
<g opacity="1" transform="translate(0 0) rotate(0 8 8)">
<mask id="bg-mask-0" fill="#fff">
<use xlink:href="#path_0" />
</mask>
<g mask="url(#bg-mask-0)">
<path d="M7 4a3 3 0 016 0v6a3 3 0 11-6 0V4z" fill="#333333">
</path>
<path d="M5.5 9.643a.75.75 0 00-1.5 0V10c0 3.06 2.29 5.585 5.25 5.954V17.5h-1.5a.75.75 0 000 1.5h4.5a.75.75 0 000-1.5h-1.5v-1.546A6.001 6.001 0 0016 10v-.357a.75.75 0 00-1.5 0V10a4.5 4.5 0 01-9 0v-.357z" fill="#333333">
</path>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 708 B

View File

@ -41,7 +41,11 @@ export default function RootLayout({
name="viewport" name="viewport"
content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"
/> />
<link rel="manifest" href="/site.webmanifest" crossOrigin="use-credentials"></link> <link
rel="manifest"
href="/site.webmanifest"
crossOrigin="use-credentials"
></link>
<script src="/serviceWorkerRegister.js" defer></script> <script src="/serviceWorkerRegister.js" defer></script>
</head> </head>
<body> <body>

View File

@ -45,6 +45,8 @@ const cn = {
FullScreen: "全屏", FullScreen: "全屏",
RefreshTitle: "刷新标题", RefreshTitle: "刷新标题",
RefreshToast: "已发送刷新标题请求", RefreshToast: "已发送刷新标题请求",
Speech: "朗读",
StopSpeech: "停止",
}, },
Commands: { Commands: {
new: "新建聊天", new: "新建聊天",
@ -79,6 +81,8 @@ const cn = {
return inputHints + "/ 触发补全,: 触发命令"; return inputHints + "/ 触发补全,: 触发命令";
}, },
Send: "发送", Send: "发送",
StartSpeak: "说话",
StopSpeak: "停止",
Config: { Config: {
Reset: "清除记忆", Reset: "清除记忆",
SaveAs: "存为面具", SaveAs: "存为面具",
@ -496,6 +500,26 @@ const cn = {
Title: "频率惩罚度 (frequency_penalty)", Title: "频率惩罚度 (frequency_penalty)",
SubTitle: "值越大,越有可能降低重复字词", SubTitle: "值越大,越有可能降低重复字词",
}, },
TTS: {
Enable: {
Title: "启用文本转语音",
SubTitle: "启用文本生成语音服务",
},
Autoplay: {
Title: "启用自动朗读",
SubTitle: "自动生成语音并播放,需先开启文本转语音开关",
},
Model: "模型",
Engine: "转换引擎",
Voice: {
Title: "声音",
SubTitle: "生成语音时使用的声音",
},
Speed: {
Title: "速度",
SubTitle: "生成语音的速度",
},
},
}, },
Store: { Store: {
DefaultTopic: "新的聊天", DefaultTopic: "新的聊天",

View File

@ -47,6 +47,8 @@ const en: LocaleType = {
FullScreen: "FullScreen", FullScreen: "FullScreen",
RefreshTitle: "Refresh Title", RefreshTitle: "Refresh Title",
RefreshToast: "Title refresh request sent", RefreshToast: "Title refresh request sent",
Speech: "Play",
StopSpeech: "Stop",
}, },
Commands: { Commands: {
new: "Start a new chat", new: "Start a new chat",
@ -81,6 +83,8 @@ const en: LocaleType = {
return inputHints + ", / to search prompts, : to use commands"; return inputHints + ", / to search prompts, : to use commands";
}, },
Send: "Send", Send: "Send",
StartSpeak: "Start Speak",
StopSpeak: "Stop Speak",
Config: { Config: {
Reset: "Reset to Default", Reset: "Reset to Default",
SaveAs: "Save as Mask", SaveAs: "Save as Mask",
@ -503,6 +507,27 @@ const en: LocaleType = {
SubTitle: SubTitle:
"A larger value decreasing the likelihood to repeat the same line", "A larger value decreasing the likelihood to repeat the same line",
}, },
TTS: {
Enable: {
Title: "Enable TTS",
SubTitle: "Enable text-to-speech service",
},
Autoplay: {
Title: "Enable Autoplay",
SubTitle:
"Automatically generate speech and play, you need to enable the text-to-speech switch first",
},
Model: "Model",
Voice: {
Title: "Voice",
SubTitle: "The voice to use when generating the audio",
},
Speed: {
Title: "Speed",
SubTitle: "The speed of the generated audio",
},
Engine: "TTS Engine",
},
}, },
Store: { Store: {
DefaultTopic: "New Conversation", DefaultTopic: "New Conversation",

View File

@ -134,3 +134,34 @@ export function getISOLang() {
const lang = getLang(); const lang = getLang();
return isoLangString[lang] ?? lang; return isoLangString[lang] ?? lang;
} }
const DEFAULT_STT_LANG = "zh-CN";
export const STT_LANG_MAP: Record<Lang, string> = {
cn: "zh-CN",
en: "en-US",
pt: "pt-BR",
tw: "zh-TW",
jp: "ja-JP",
ko: "ko-KR",
id: "id-ID",
fr: "fr-FR",
es: "es-ES",
it: "it-IT",
tr: "tr-TR",
de: "de-DE",
vi: "vi-VN",
ru: "ru-RU",
cs: "cs-CZ",
no: "no-NO",
ar: "ar-SA",
bn: "bn-BD",
sk: "sk-SK",
};
export function getSTTLang(): string {
try {
return STT_LANG_MAP[getLang()];
} catch {
return DEFAULT_STT_LANG;
}
}

View File

@ -120,6 +120,9 @@ const DEFAULT_ACCESS_STATE = {
disableFastLink: false, disableFastLink: false,
customModels: "", customModels: "",
defaultModel: "", defaultModel: "",
// tts config
edgeTTSVoiceName: "zh-CN-YunxiNeural",
}; };
export const useAccessStore = createPersistStore( export const useAccessStore = createPersistStore(
@ -132,6 +135,12 @@ export const useAccessStore = createPersistStore(
return get().needCode; return get().needCode;
}, },
edgeVoiceName() {
this.fetch();
return get().edgeTTSVoiceName;
},
isValidOpenAI() { isValidOpenAI() {
return ensure(get(), ["openaiApiKey"]); return ensure(get(), ["openaiApiKey"]);
}, },

View File

@ -5,12 +5,21 @@ import {
DEFAULT_INPUT_TEMPLATE, DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS, DEFAULT_MODELS,
DEFAULT_SIDEBAR_WIDTH, DEFAULT_SIDEBAR_WIDTH,
DEFAULT_TTS_ENGINE,
DEFAULT_TTS_ENGINES,
DEFAULT_TTS_MODEL,
DEFAULT_TTS_MODELS,
DEFAULT_TTS_VOICE,
DEFAULT_TTS_VOICES,
StoreKey, StoreKey,
ServiceProvider, ServiceProvider,
} from "../constant"; } from "../constant";
import { createPersistStore } from "../utils/store"; import { createPersistStore } from "../utils/store";
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"]; export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
export type TTSModelType = (typeof DEFAULT_TTS_MODELS)[number];
export type TTSVoiceType = (typeof DEFAULT_TTS_VOICES)[number];
export type TTSEngineType = (typeof DEFAULT_TTS_ENGINES)[number];
export enum SubmitKey { export enum SubmitKey {
Enter = "Enter", Enter = "Enter",
@ -68,11 +77,21 @@ export const DEFAULT_CONFIG = {
quality: "standard" as DalleQuality, quality: "standard" as DalleQuality,
style: "vivid" as DalleStyle, style: "vivid" as DalleStyle,
}, },
ttsConfig: {
enable: false,
autoplay: false,
engine: DEFAULT_TTS_ENGINE,
model: DEFAULT_TTS_MODEL,
voice: DEFAULT_TTS_VOICE,
speed: 1.0,
},
}; };
export type ChatConfig = typeof DEFAULT_CONFIG; export type ChatConfig = typeof DEFAULT_CONFIG;
export type ModelConfig = ChatConfig["modelConfig"]; export type ModelConfig = ChatConfig["modelConfig"];
export type TTSConfig = ChatConfig["ttsConfig"];
export function limitNumber( export function limitNumber(
x: number, x: number,
@ -87,6 +106,21 @@ export function limitNumber(
return Math.min(max, Math.max(min, x)); return Math.min(max, Math.max(min, x));
} }
export const TTSConfigValidator = {
engine(x: string) {
return x as TTSEngineType;
},
model(x: string) {
return x as TTSModelType;
},
voice(x: string) {
return x as TTSVoiceType;
},
speed(x: number) {
return limitNumber(x, 0.25, 4.0, 1.0);
},
};
export const ModalConfigValidator = { export const ModalConfigValidator = {
model(x: string) { model(x: string) {
return x as ModelType; return x as ModelType;

45
app/utils/audio.ts Normal file
View File

@ -0,0 +1,45 @@
type TTSPlayer = {
init: () => void;
play: (audioBuffer: ArrayBuffer, onended: () => void | null) => Promise<void>;
stop: () => void;
};
export function createTTSPlayer(): TTSPlayer {
let audioContext: AudioContext | null = null;
let audioBufferSourceNode: AudioBufferSourceNode | null = null;
const init = () => {
audioContext = new (window.AudioContext || window.webkitAudioContext)();
audioContext.suspend();
};
const play = async (audioBuffer: ArrayBuffer, onended: () => void | null) => {
if (audioBufferSourceNode) {
audioBufferSourceNode.stop();
audioBufferSourceNode.disconnect();
}
const buffer = await audioContext!.decodeAudioData(audioBuffer);
audioBufferSourceNode = audioContext!.createBufferSource();
audioBufferSourceNode.buffer = buffer;
audioBufferSourceNode.connect(audioContext!.destination);
audioContext!.resume().then(() => {
audioBufferSourceNode!.start();
});
audioBufferSourceNode.onended = onended;
};
const stop = () => {
if (audioBufferSourceNode) {
audioBufferSourceNode.stop();
audioBufferSourceNode.disconnect();
audioBufferSourceNode = null;
}
if (audioContext) {
audioContext.close();
audioContext = null;
}
};
return { init, play, stop };
}

391
app/utils/ms_edge_tts.ts Normal file
View File

@ -0,0 +1,391 @@
// import axios from "axios";
import { Buffer } from "buffer";
import { randomBytes } from "crypto";
import { Readable } from "stream";
// Modified according to https://github.com/Migushthe2nd/MsEdgeTTS
/**
* https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,volume,-Indicates%20the%20volume
*/
export enum VOLUME {
SILENT = "silent",
X_SOFT = "x-soft",
SOFT = "soft",
MEDIUM = "medium",
LOUD = "loud",
X_LOUD = "x-LOUD",
DEFAULT = "default",
}
/**
* https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,rate,-Indicates%20the%20speaking
*/
export enum RATE {
X_SLOW = "x-slow",
SLOW = "slow",
MEDIUM = "medium",
FAST = "fast",
X_FAST = "x-fast",
DEFAULT = "default",
}
/**
* https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,pitch,-Indicates%20the%20baseline
*/
export enum PITCH {
X_LOW = "x-low",
LOW = "low",
MEDIUM = "medium",
HIGH = "high",
X_HIGH = "x-high",
DEFAULT = "default",
}
/**
* Only a few of the [possible formats](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/rest-text-to-speech#audio-outputs) are accepted.
*/
export enum OUTPUT_FORMAT {
// Streaming =============================
// AMR_WB_16000HZ = "amr-wb-16000hz",
// AUDIO_16KHZ_16BIT_32KBPS_MONO_OPUS = "audio-16khz-16bit-32kbps-mono-opus",
// AUDIO_16KHZ_32KBITRATE_MONO_MP3 = "audio-16khz-32kbitrate-mono-mp3",
// AUDIO_16KHZ_64KBITRATE_MONO_MP3 = "audio-16khz-64kbitrate-mono-mp3",
// AUDIO_16KHZ_128KBITRATE_MONO_MP3 = "audio-16khz-128kbitrate-mono-mp3",
// AUDIO_24KHZ_16BIT_24KBPS_MONO_OPUS = "audio-24khz-16bit-24kbps-mono-opus",
// AUDIO_24KHZ_16BIT_48KBPS_MONO_OPUS = "audio-24khz-16bit-48kbps-mono-opus",
AUDIO_24KHZ_48KBITRATE_MONO_MP3 = "audio-24khz-48kbitrate-mono-mp3",
AUDIO_24KHZ_96KBITRATE_MONO_MP3 = "audio-24khz-96kbitrate-mono-mp3",
// AUDIO_24KHZ_160KBITRATE_MONO_MP3 = "audio-24khz-160kbitrate-mono-mp3",
// AUDIO_48KHZ_96KBITRATE_MONO_MP3 = "audio-48khz-96kbitrate-mono-mp3",
// AUDIO_48KHZ_192KBITRATE_MONO_MP3 = "audio-48khz-192kbitrate-mono-mp3",
// OGG_16KHZ_16BIT_MONO_OPUS = "ogg-16khz-16bit-mono-opus",
// OGG_24KHZ_16BIT_MONO_OPUS = "ogg-24khz-16bit-mono-opus",
// OGG_48KHZ_16BIT_MONO_OPUS = "ogg-48khz-16bit-mono-opus",
// RAW_8KHZ_8BIT_MONO_ALAW = "raw-8khz-8bit-mono-alaw",
// RAW_8KHZ_8BIT_MONO_MULAW = "raw-8khz-8bit-mono-mulaw",
// RAW_8KHZ_16BIT_MONO_PCM = "raw-8khz-16bit-mono-pcm",
// RAW_16KHZ_16BIT_MONO_PCM = "raw-16khz-16bit-mono-pcm",
// RAW_16KHZ_16BIT_MONO_TRUESILK = "raw-16khz-16bit-mono-truesilk",
// RAW_22050HZ_16BIT_MONO_PCM = "raw-22050hz-16bit-mono-pcm",
// RAW_24KHZ_16BIT_MONO_PCM = "raw-24khz-16bit-mono-pcm",
// RAW_24KHZ_16BIT_MONO_TRUESILK = "raw-24khz-16bit-mono-truesilk",
// RAW_44100HZ_16BIT_MONO_PCM = "raw-44100hz-16bit-mono-pcm",
// RAW_48KHZ_16BIT_MONO_PCM = "raw-48khz-16bit-mono-pcm",
// WEBM_16KHZ_16BIT_MONO_OPUS = "webm-16khz-16bit-mono-opus",
// WEBM_24KHZ_16BIT_24KBPS_MONO_OPUS = "webm-24khz-16bit-24kbps-mono-opus",
WEBM_24KHZ_16BIT_MONO_OPUS = "webm-24khz-16bit-mono-opus",
// Non-streaming =============================
// RIFF_8KHZ_8BIT_MONO_ALAW = "riff-8khz-8bit-mono-alaw",
// RIFF_8KHZ_8BIT_MONO_MULAW = "riff-8khz-8bit-mono-mulaw",
// RIFF_8KHZ_16BIT_MONO_PCM = "riff-8khz-16bit-mono-pcm",
// RIFF_22050HZ_16BIT_MONO_PCM = "riff-22050hz-16bit-mono-pcm",
// RIFF_24KHZ_16BIT_MONO_PCM = "riff-24khz-16bit-mono-pcm",
// RIFF_44100HZ_16BIT_MONO_PCM = "riff-44100hz-16bit-mono-pcm",
// RIFF_48KHZ_16BIT_MONO_PCM = "riff-48khz-16bit-mono-pcm",
}
export type Voice = {
Name: string;
ShortName: string;
Gender: string;
Locale: string;
SuggestedCodec: string;
FriendlyName: string;
Status: string;
};
export class ProsodyOptions {
/**
* The pitch to use.
* Can be any {@link PITCH}, or a relative frequency in Hz (+50Hz), a relative semitone (+2st), or a relative percentage (+50%).
* [SSML documentation](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,pitch,-Indicates%20the%20baseline)
*/
pitch?: PITCH | string = "+0Hz";
/**
* The rate to use.
* Can be any {@link RATE}, or a relative number (0.5), or string with a relative percentage (+50%).
* [SSML documentation](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,rate,-Indicates%20the%20speaking)
*/
rate?: RATE | string | number = 1.0;
/**
* The volume to use.
* Can be any {@link VOLUME}, or an absolute number (0, 100), a string with a relative number (+50), or a relative percentage (+50%).
* [SSML documentation](https://learn.microsoft.com/en-us/azure/ai-services/speech-service/speech-synthesis-markup-voice#:~:text=Optional-,volume,-Indicates%20the%20volume)
*/
volume?: VOLUME | string | number = 100.0;
}
export class MsEdgeTTS {
static OUTPUT_FORMAT = OUTPUT_FORMAT;
private static TRUSTED_CLIENT_TOKEN = "6A5AA1D4EAFF4E9FB37E23D68491D6F4";
private static VOICES_URL = `https://speech.platform.bing.com/consumer/speech/synthesize/readaloud/voices/list?trustedclienttoken=${MsEdgeTTS.TRUSTED_CLIENT_TOKEN}`;
private static SYNTH_URL = `wss://speech.platform.bing.com/consumer/speech/synthesize/readaloud/edge/v1?TrustedClientToken=${MsEdgeTTS.TRUSTED_CLIENT_TOKEN}`;
private static BINARY_DELIM = "Path:audio\r\n";
private static VOICE_LANG_REGEX = /\w{2}-\w{2}/;
private readonly _enableLogger;
private _ws: WebSocket | undefined;
private _voice: any;
private _voiceLocale: any;
private _outputFormat: any;
private _streams: { [key: string]: Readable } = {};
private _startTime = 0;
private _log(...o: any[]) {
if (this._enableLogger) {
console.log(...o);
}
}
/**
* Create a new `MsEdgeTTS` instance.
*
* @param agent (optional, **NOT SUPPORTED IN BROWSER**) Use a custom http.Agent implementation like [https-proxy-agent](https://github.com/TooTallNate/proxy-agents) or [socks-proxy-agent](https://github.com/TooTallNate/proxy-agents/tree/main/packages/socks-proxy-agent).
* @param enableLogger=false whether to enable the built-in logger. This logs connections inits, disconnects, and incoming data to the console
*/
public constructor(enableLogger: boolean = false) {
this._enableLogger = enableLogger;
}
private async _send(message: any) {
for (let i = 1; i <= 3 && this._ws!.readyState !== this._ws!.OPEN; i++) {
if (i == 1) {
this._startTime = Date.now();
}
this._log("connecting: ", i);
await this._initClient();
}
this._ws!.send(message);
}
private _initClient() {
this._ws = new WebSocket(MsEdgeTTS.SYNTH_URL);
this._ws.binaryType = "arraybuffer";
return new Promise((resolve, reject) => {
this._ws!.onopen = () => {
this._log(
"Connected in",
(Date.now() - this._startTime) / 1000,
"seconds",
);
this._send(
`Content-Type:application/json; charset=utf-8\r\nPath:speech.config\r\n\r\n
{
"context": {
"synthesis": {
"audio": {
"metadataoptions": {
"sentenceBoundaryEnabled": "false",
"wordBoundaryEnabled": "false"
},
"outputFormat": "${this._outputFormat}"
}
}
}
}
`,
).then(resolve);
};
this._ws!.onmessage = (m: any) => {
const buffer = Buffer.from(m.data as ArrayBuffer);
const message = buffer.toString();
const requestId = /X-RequestId:(.*?)\r\n/gm.exec(message)![1];
if (message.includes("Path:turn.start")) {
// start of turn, ignore
} else if (message.includes("Path:turn.end")) {
// end of turn, close stream
this._streams[requestId].push(null);
} else if (message.includes("Path:response")) {
// context response, ignore
} else if (
message.includes("Path:audio") &&
m.data instanceof ArrayBuffer
) {
this._pushAudioData(buffer, requestId);
} else {
this._log("UNKNOWN MESSAGE", message);
}
};
this._ws!.onclose = () => {
this._log(
"disconnected after:",
(Date.now() - this._startTime) / 1000,
"seconds",
);
for (const requestId in this._streams) {
this._streams[requestId].push(null);
}
};
this._ws!.onerror = function (error: any) {
reject("Connect Error: " + error);
};
});
}
private _pushAudioData(audioBuffer: Buffer, requestId: string) {
const audioStartIndex =
audioBuffer.indexOf(MsEdgeTTS.BINARY_DELIM) +
MsEdgeTTS.BINARY_DELIM.length;
const audioData = audioBuffer.subarray(audioStartIndex);
this._streams[requestId].push(audioData);
this._log("received audio chunk, size: ", audioData?.length);
}
private _SSMLTemplate(input: string, options: ProsodyOptions = {}): string {
// in case future updates to the edge API block these elements, we'll be concatenating strings.
options = { ...new ProsodyOptions(), ...options };
return `<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xmlns:mstts="https://www.w3.org/2001/mstts" xml:lang="${this._voiceLocale}">
<voice name="${this._voice}">
<prosody pitch="${options.pitch}" rate="${options.rate}" volume="${options.volume}">
${input}
</prosody>
</voice>
</speak>`;
}
/**
* Fetch the list of voices available in Microsoft Edge.
* These, however, are not all. The complete list of voices supported by this module [can be found here](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support) (neural, standard, and preview).
*/
// getVoices(): Promise<Voice[]> {
// return new Promise((resolve, reject) => {
// axios
// .get(MsEdgeTTS.VOICES_URL)
// .then((res) => resolve(res.data))
// .catch(reject);
// });
// }
getVoices(): Promise<Voice[]> {
return fetch(MsEdgeTTS.VOICES_URL)
.then((response) => {
if (!response.ok) {
throw new Error("Network response was not ok");
}
return response.json();
})
.then((data) => data as Voice[])
.catch((error) => {
throw error;
});
}
/**
* Sets the required information for the speech to be synthesised and inits a new WebSocket connection.
* Must be called at least once before text can be synthesised.
* Saved in this instance. Can be called at any time times to update the metadata.
*
* @param voiceName a string with any `ShortName`. A list of all available neural voices can be found [here](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support#neural-voices). However, it is not limited to neural voices: standard voices can also be used. A list of standard voices can be found [here](https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support#standard-voices)
* @param outputFormat any {@link OUTPUT_FORMAT}
* @param voiceLocale (optional) any voice locale that is supported by the voice. See the list of all voices for compatibility. If not provided, the locale will be inferred from the `voiceName`
*/
async setMetadata(
voiceName: string,
outputFormat: OUTPUT_FORMAT,
voiceLocale?: string,
) {
const oldVoice = this._voice;
const oldVoiceLocale = this._voiceLocale;
const oldOutputFormat = this._outputFormat;
this._voice = voiceName;
this._voiceLocale = voiceLocale;
if (!this._voiceLocale) {
const voiceLangMatch = MsEdgeTTS.VOICE_LANG_REGEX.exec(this._voice);
if (!voiceLangMatch)
throw new Error("Could not infer voiceLocale from voiceName!");
this._voiceLocale = voiceLangMatch[0];
}
this._outputFormat = outputFormat;
const changed =
oldVoice !== this._voice ||
oldVoiceLocale !== this._voiceLocale ||
oldOutputFormat !== this._outputFormat;
// create new client
if (changed || this._ws!.readyState !== this._ws!.OPEN) {
this._startTime = Date.now();
await this._initClient();
}
}
private _metadataCheck() {
if (!this._ws)
throw new Error(
"Speech synthesis not configured yet. Run setMetadata before calling toStream or toFile.",
);
}
/**
* Close the WebSocket connection.
*/
close() {
this._ws!.close();
}
/**
* Writes raw audio synthesised from text in real-time to a {@link Readable}. Uses a basic {@link _SSMLTemplate SML template}.
*
* @param input the text to synthesise. Can include SSML elements.
* @param options (optional) {@link ProsodyOptions}
* @returns {Readable} - a `stream.Readable` with the audio data
*/
toStream(input: string, options?: ProsodyOptions): Readable {
const { stream } = this._rawSSMLRequest(this._SSMLTemplate(input, options));
return stream;
}
toArrayBuffer(input: string, options?: ProsodyOptions): Promise<ArrayBuffer> {
return new Promise((resolve, reject) => {
let data: Uint8Array[] = [];
const readable = this.toStream(input, options);
readable.on("data", (chunk) => {
data.push(chunk);
});
readable.on("end", () => {
resolve(Buffer.concat(data).buffer);
});
readable.on("error", (err) => {
reject(err);
});
});
}
/**
* Writes raw audio synthesised from a request in real-time to a {@link Readable}. Has no SSML template. Basic SSML should be provided in the request.
*
* @param requestSSML the SSML to send. SSML elements required in order to work.
* @returns {Readable} - a `stream.Readable` with the audio data
*/
rawToStream(requestSSML: string): Readable {
const { stream } = this._rawSSMLRequest(requestSSML);
return stream;
}
private _rawSSMLRequest(requestSSML: string): {
stream: Readable;
requestId: string;
} {
this._metadataCheck();
const requestId = randomBytes(16).toString("hex");
const request =
`X-RequestId:${requestId}\r\nContent-Type:application/ssml+xml\r\nPath:ssml\r\n\r\n
` + requestSSML.trim();
// https://docs.microsoft.com/en-us/azure/cognitive-services/speech-service/speech-synthesis-markup
const self = this;
const stream = new Readable({
read() {},
destroy(error: Error | null, callback: (error: Error | null) => void) {
delete self._streams[requestId];
callback(error);
},
});
this._streams[requestId] = stream;
this._send(request).then();
return { stream, requestId };
}
}

View File

@ -32,6 +32,7 @@
"idb-keyval": "^6.2.1", "idb-keyval": "^6.2.1",
"lodash-es": "^4.17.21", "lodash-es": "^4.17.21",
"mermaid": "^10.6.1", "mermaid": "^10.6.1",
"markdown-to-txt": "^2.0.1",
"nanoid": "^5.0.3", "nanoid": "^5.0.3",
"next": "^14.1.1", "next": "^14.1.1",
"node-fetch": "^3.3.1", "node-fetch": "^3.3.1",
@ -79,4 +80,4 @@
"lint-staged/yaml": "^2.2.2" "lint-staged/yaml": "^2.2.2"
}, },
"packageManager": "yarn@1.22.19" "packageManager": "yarn@1.22.19"
} }

View File

@ -4455,11 +4455,21 @@ lodash.debounce@^4.0.8:
resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af"
integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==
lodash.escape@^4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/lodash.escape/-/lodash.escape-4.0.1.tgz#c9044690c21e04294beaa517712fded1fa88de98"
integrity sha512-nXEOnb/jK9g0DYMr1/Xvq6l5xMD7GDG55+GSYIYmS0G4tBk/hURD4JR9WCavs04t33WmJx9kCyp9vJ+mr4BOUw==
lodash.merge@^4.6.2: lodash.merge@^4.6.2:
version "4.6.2" version "4.6.2"
resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a"
integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==
lodash.unescape@^4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/lodash.unescape/-/lodash.unescape-4.0.1.tgz#bf2249886ce514cda112fae9218cdc065211fc9c"
integrity sha512-DhhGRshNS1aX6s5YdBE3njCCouPgnG29ebyHvImlZzXZf2SHgt+J08DHgytTPnpywNbO1Y8mNUFyQuIDBq2JZg==
lodash@^4.17.21: lodash@^4.17.21:
version "4.17.21" version "4.17.21"
resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
@ -4515,6 +4525,20 @@ markdown-table@^3.0.0:
resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-3.0.3.tgz#e6331d30e493127e031dd385488b5bd326e4a6bd" resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-3.0.3.tgz#e6331d30e493127e031dd385488b5bd326e4a6bd"
integrity sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw== integrity sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==
markdown-to-txt@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/markdown-to-txt/-/markdown-to-txt-2.0.1.tgz#bfd6233a2635443cc24900a158b60c6af36ce9c5"
integrity sha512-Hsj7KTN8k1gutlLum3vosHwVZGnv8/cbYKWVkUyo/D1rzOYddbDesILebRfOsaVfjIBJank/AVOySBlHAYqfZw==
dependencies:
lodash.escape "^4.0.1"
lodash.unescape "^4.0.1"
marked "^4.0.14"
marked@^4.0.14:
version "4.3.0"
resolved "https://registry.yarnpkg.com/marked/-/marked-4.3.0.tgz#796362821b019f734054582038b116481b456cf3"
integrity sha512-PRsaiG84bK+AMvxziE/lCFss8juXjNaWzVbN5tXAm4XjeaS9NAHhop+PjQxz2A9h8Q4M/xGmzP8vqNwy6JeK0A==
mdast-util-definitions@^5.0.0: mdast-util-definitions@^5.0.0:
version "5.1.2" version "5.1.2"
resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz#9910abb60ac5d7115d6819b57ae0bcef07a3f7a7" resolved "https://registry.yarnpkg.com/mdast-util-definitions/-/mdast-util-definitions-5.1.2.tgz#9910abb60ac5d7115d6819b57ae0bcef07a3f7a7"