mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-08 21:17:24 +08:00
merge main
This commit is contained in:
@@ -13,6 +13,7 @@ import { ErnieApi } from "./platforms/baidu";
|
||||
import { DoubaoApi } from "./platforms/bytedance";
|
||||
import { QwenApi } from "./platforms/alibaba";
|
||||
import { HunyuanApi } from "./platforms/tencent";
|
||||
import { MoonshotApi } from "./platforms/moonshot";
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
@@ -120,6 +121,9 @@ export class ClientApi {
|
||||
case ModelProvider.Hunyuan:
|
||||
this.llm = new HunyuanApi();
|
||||
break;
|
||||
case ModelProvider.Moonshot:
|
||||
this.llm = new MoonshotApi();
|
||||
break;
|
||||
default:
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
@@ -202,6 +206,7 @@ export function getHeaders() {
|
||||
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
|
||||
const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
|
||||
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
|
||||
const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot;
|
||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
@@ -213,6 +218,8 @@ export function getHeaders() {
|
||||
? accessStore.bytedanceApiKey
|
||||
: isAlibaba
|
||||
? accessStore.alibabaApiKey
|
||||
: isMoonshot
|
||||
? accessStore.moonshotApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
return {
|
||||
isGoogle,
|
||||
@@ -221,6 +228,7 @@ export function getHeaders() {
|
||||
isBaidu,
|
||||
isByteDance,
|
||||
isAlibaba,
|
||||
isMoonshot,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
};
|
||||
@@ -272,6 +280,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi {
|
||||
return new ClientApi(ModelProvider.Qwen);
|
||||
case ServiceProvider.Tencent:
|
||||
return new ClientApi(ModelProvider.Hunyuan);
|
||||
case ServiceProvider.Moonshot:
|
||||
return new ClientApi(ModelProvider.Moonshot);
|
||||
default:
|
||||
return new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
|
251
app/client/platforms/moonshot.ts
Normal file
251
app/client/platforms/moonshot.ts
Normal file
@@ -0,0 +1,251 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
Moonshot,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
import { OpenAIListModelResponse, RequestPayload } from "./openai";
|
||||
|
||||
export class MoonshotApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.moonshotUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
const apiPath = ApiPath.Moonshot;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Moonshot)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||
};
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(Moonshot.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[OpenAI] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.delta?.content;
|
||||
const textmoderation = json?.prompt_filter_results;
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user