mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-09-09 11:43:06 +08:00
feat: 1) Present 'maxtokens' as properties tied to a single model. 2) Remove the original author's implementation of the send verification logic and replace it with a user input validator. Pre-verification 3) Provides the ability to pull the 'User Visible modellist' provided by 'provider' 4) Provider-related parameters are passed in the constructor of 'providerClient'. Not passed in the 'chat' method
This commit is contained in:
@@ -1,19 +1,26 @@
|
||||
import { modelConfigs, settingItems, SettingKeys, OpenaiMetas } from "./config";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import {
|
||||
ChatHandlers,
|
||||
InternalChatRequestPayload,
|
||||
IProviderTemplate,
|
||||
} from "../../core/types";
|
||||
ModelInfo,
|
||||
getMessageTextContent,
|
||||
} from "../../common";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import Locale from "@/app/locales";
|
||||
import { makeBearer, validString, prettyObject } from "./utils";
|
||||
import {
|
||||
modelConfigs,
|
||||
settingItems,
|
||||
SettingKeys,
|
||||
OpenaiMetas,
|
||||
ROLES,
|
||||
} from "./config";
|
||||
|
||||
export type OpenAIProviderSettingKeys = SettingKeys;
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
||||
export interface MultimodalContent {
|
||||
@@ -28,7 +35,6 @@ export interface RequestMessage {
|
||||
role: MessageRole;
|
||||
content: string | MultimodalContent[];
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
@@ -43,6 +49,16 @@ interface RequestPayload {
|
||||
max_tokens?: number;
|
||||
}
|
||||
|
||||
interface ModelList {
|
||||
object: "list";
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: "model";
|
||||
created: number;
|
||||
owned_by: "system" | "openai-internal";
|
||||
}>;
|
||||
}
|
||||
|
||||
class OpenAIProvider
|
||||
implements IProviderTemplate<SettingKeys, "openai", typeof OpenaiMetas>
|
||||
{
|
||||
@@ -51,7 +67,7 @@ class OpenAIProvider
|
||||
|
||||
readonly REQUEST_TIMEOUT_MS = 60000;
|
||||
|
||||
models = modelConfigs.map((c) => ({ ...c, providerTemplateName: this.name }));
|
||||
defaultModels = modelConfigs;
|
||||
|
||||
providerMeta = {
|
||||
displayName: "OpenAI",
|
||||
@@ -62,25 +78,11 @@ class OpenAIProvider
|
||||
const {
|
||||
providerConfig: { openaiUrl },
|
||||
} = payload;
|
||||
|
||||
const path = OpenaiMetas.ChatPath;
|
||||
|
||||
let baseUrl = openaiUrl;
|
||||
console.log("[Proxy Endpoint] ", openaiUrl, path);
|
||||
|
||||
if (!baseUrl) {
|
||||
baseUrl = "/api/openai";
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api/openai")) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
return [openaiUrl, path].join("/");
|
||||
}
|
||||
|
||||
private getHeaders(payload: InternalChatRequestPayload<SettingKeys>) {
|
||||
@@ -90,14 +92,9 @@ class OpenAIProvider
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
};
|
||||
const authHeader = "Authorization";
|
||||
|
||||
const makeBearer = (s: string) => `Bearer ${s.trim()}`;
|
||||
const validString = (x?: string): x is string => Boolean(x && x.length > 0);
|
||||
|
||||
// when using google api in app, not set auth header
|
||||
if (validString(openaiApiKey)) {
|
||||
headers[authHeader] = makeBearer(openaiApiKey);
|
||||
headers["Authorization"] = makeBearer(openaiApiKey);
|
||||
}
|
||||
|
||||
return headers;
|
||||
@@ -143,9 +140,11 @@ class OpenAIProvider
|
||||
};
|
||||
}
|
||||
|
||||
private readWholeMessageResponseBody(res: any) {
|
||||
private readWholeMessageResponseBody(res: {
|
||||
choices: { message: { content: any } }[];
|
||||
}) {
|
||||
return {
|
||||
message: res.choices?.at(0)?.message?.content ?? "",
|
||||
message: res.choices?.[0]?.message?.content ?? "",
|
||||
};
|
||||
}
|
||||
|
||||
@@ -190,52 +189,12 @@ class OpenAIProvider
|
||||
|
||||
streamChat(
|
||||
payload: InternalChatRequestPayload<SettingKeys>,
|
||||
onProgress: (message: string, chunk: string) => void,
|
||||
onFinish: (message: string) => void,
|
||||
onError: (err: Error) => void,
|
||||
handlers: ChatHandlers,
|
||||
) {
|
||||
const requestPayload = this.formatChatPayload(payload);
|
||||
|
||||
const timer = this.getTimer();
|
||||
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
const animateResponseText = () => {
|
||||
if (finished || timer.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
onError(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
onProgress(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
};
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
timer.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(requestPayload.url, {
|
||||
...requestPayload,
|
||||
async onopen(res) {
|
||||
@@ -244,8 +203,8 @@ class OpenAIProvider
|
||||
console.log("[OpenAI] request response content type: ", contentType);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
const responseText = await res.clone().text();
|
||||
return handlers.onFlash(responseText);
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -255,29 +214,29 @@ class OpenAIProvider
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
const responseTexts = [];
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
const responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
return handlers.onFlash(responseText);
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
if (msg.data === "[DONE]") {
|
||||
return;
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
@@ -286,20 +245,19 @@ class OpenAIProvider
|
||||
delta: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.delta?.content;
|
||||
const textmoderation = json?.prompt_filter_results;
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
handlers.onProgress(delta);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
handlers.onFinish();
|
||||
},
|
||||
onerror(e) {
|
||||
onError(e);
|
||||
handlers.onError(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
@@ -307,6 +265,23 @@ class OpenAIProvider
|
||||
|
||||
return timer;
|
||||
}
|
||||
|
||||
async getAvailableModels(
|
||||
providerConfig: Record<SettingKeys, string>,
|
||||
): Promise<ModelInfo[]> {
|
||||
const { openaiApiKey, openaiUrl } = providerConfig;
|
||||
const res = await fetch(`${openaiUrl}/vi/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${openaiApiKey}`,
|
||||
},
|
||||
method: "GET",
|
||||
});
|
||||
const data: ModelList = await res.json();
|
||||
|
||||
return data.data.map((o) => ({
|
||||
name: o.id,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenAIProvider;
|
||||
|
Reference in New Issue
Block a user