mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-08 21:17:24 +08:00
Merge remote-tracking branch 'source/main'
This commit is contained in:
@@ -21,7 +21,7 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
|
@@ -3,7 +3,6 @@ import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import { RequestMessage } from "@/app/typing";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
@@ -12,6 +11,7 @@ import {
|
||||
import Locale from "../../locales";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
export type MultiBlockContent = {
|
||||
@@ -93,7 +93,12 @@ export class ClaudeApi implements LLMApi {
|
||||
},
|
||||
};
|
||||
|
||||
const messages = [...options.messages];
|
||||
// try get base64image from local cache image_url
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = await preProcessImageContent(v.content);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const keys = ["system", "user"];
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
@@ -14,8 +14,37 @@ import {
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.googleUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + `/api/proxy/google?key=${accessStore.googleApiKey}`
|
||||
: ApiPath.Google;
|
||||
}
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Google)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
let chatPath = [baseUrl, path].join("/");
|
||||
|
||||
chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse";
|
||||
return chatPath;
|
||||
}
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] gemini-pro response: ", res);
|
||||
|
||||
@@ -28,7 +57,14 @@ export class GeminiProApi implements LLMApi {
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const apiClient = this;
|
||||
let multimodal = false;
|
||||
const messages = options.messages.map((v) => {
|
||||
|
||||
// try get base64image from local cache image_url
|
||||
const _messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = await preProcessImageContent(v.content);
|
||||
_messages.push({ role: v.role, content });
|
||||
}
|
||||
const messages = _messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
if (isVisionModel(options.config.model)) {
|
||||
const images = getMessageImages(v);
|
||||
@@ -111,30 +147,13 @@ export class GeminiProApi implements LLMApi {
|
||||
],
|
||||
};
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.googleUrl;
|
||||
}
|
||||
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
let shouldStream = !!options.config.stream;
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
try {
|
||||
if (!baseUrl) {
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/google/"
|
||||
: this.path("");
|
||||
}
|
||||
baseUrl = `${baseUrl}/${Google.ChatPath(modelConfig.model)}`.replaceAll(
|
||||
"//",
|
||||
"/",
|
||||
);
|
||||
if (isApp) {
|
||||
baseUrl += `?key=${accessStore.googleApiKey}`;
|
||||
}
|
||||
// https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
|
||||
const chatPath = this.path(Google.ChatPath(modelConfig.model));
|
||||
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
@@ -184,10 +203,6 @@ export class GeminiProApi implements LLMApi {
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
// https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
|
||||
const chatPath =
|
||||
baseUrl.replace("generateContent", "streamGenerateContent") +
|
||||
(baseUrl.indexOf("?") > -1 ? "&alt=sse" : "?alt=sse");
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
@@ -262,7 +277,7 @@ export class GeminiProApi implements LLMApi {
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(baseUrl, chatPayload);
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
const resJson = await res.json();
|
||||
if (resJson?.promptFeedback?.blockReason) {
|
||||
@@ -288,14 +303,4 @@ export class GeminiProApi implements LLMApi {
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
path(path: string): string {
|
||||
return "/api/google/" + path;
|
||||
}
|
||||
}
|
||||
|
||||
function ensureProperEnding(str: string) {
|
||||
if (str.startsWith("[") && !str.endsWith("]")) {
|
||||
return str + "]";
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@ import {
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
|
||||
import {
|
||||
@@ -105,10 +106,13 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = visionModel
|
||||
? await preProcessImageContent(v.content)
|
||||
: getMessageTextContent(v);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
|
Reference in New Issue
Block a user