This commit is contained in:
GH Action - Upstream Sync 2024-01-01 01:15:54 +00:00
commit 4f1dac4d29
13 changed files with 46 additions and 11 deletions

View File

@ -14,8 +14,8 @@ PROXY_URL=http://localhost:7890
GOOGLE_API_KEY= GOOGLE_API_KEY=
# (optional) # (optional)
# Default: https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent # Default: https://generativelanguage.googleapis.com/
# Googel Gemini Pro API url, set if you want to customize Google Gemini Pro API url. # Googel Gemini Pro API url without pathname, set if you want to customize Google Gemini Pro API url.
GOOGLE_URL= GOOGLE_URL=
# Override openai api request base url. (optional) # Override openai api request base url. (optional)

View File

@ -1,5 +1,5 @@
<div align="center"> <div align="center">
<img src="./docs/images/icon.svg" alt="icon"/> <img src="./docs/images/head-cover.png" alt="icon"/>
<h1 align="center">NextChat (ChatGPT Next Web)</h1> <h1 align="center">NextChat (ChatGPT Next Web)</h1>

View File

@ -9,6 +9,7 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales"; import Locale from "../../locales";
import { getServerSideConfig } from "@/app/config/server"; import { getServerSideConfig } from "@/app/config/server";
import de from "@/app/locales/de";
export class GeminiProApi implements LLMApi { export class GeminiProApi implements LLMApi {
extractMessage(res: any) { extractMessage(res: any) {
console.log("[Response] gemini-pro response: ", res); console.log("[Response] gemini-pro response: ", res);
@ -58,6 +59,24 @@ export class GeminiProApi implements LLMApi {
topP: modelConfig.top_p, topP: modelConfig.top_p,
// "topK": modelConfig.top_k, // "topK": modelConfig.top_k,
}, },
safetySettings: [
{
category: "HARM_CATEGORY_HARASSMENT",
threshold: "BLOCK_ONLY_HIGH",
},
{
category: "HARM_CATEGORY_HATE_SPEECH",
threshold: "BLOCK_ONLY_HIGH",
},
{
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
threshold: "BLOCK_ONLY_HIGH",
},
{
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
threshold: "BLOCK_ONLY_HIGH",
},
],
}; };
console.log("[Request] google payload: ", requestPayload); console.log("[Request] google payload: ", requestPayload);
@ -87,9 +106,11 @@ export class GeminiProApi implements LLMApi {
"streamGenerateContent", "streamGenerateContent",
); );
let finished = false; let finished = false;
let existingTexts: string[] = [];
const finish = () => { const finish = () => {
finished = true; finished = true;
options.onFinish(responseText + remainText); options.onFinish(existingTexts.join(""));
}; };
// animate response to make it looks smooth // animate response to make it looks smooth
@ -134,11 +155,26 @@ export class GeminiProApi implements LLMApi {
try { try {
let data = JSON.parse(ensureProperEnding(partialData)); let data = JSON.parse(ensureProperEnding(partialData));
console.log(data);
let fetchText = apiClient.extractMessage(data[data.length - 1]); const textArray = data.reduce(
console.log("[Response Animation] fetchText: ", fetchText); (acc: string[], item: { candidates: any[] }) => {
remainText += fetchText; const texts = item.candidates.map((candidate) =>
candidate.content.parts
.map((part: { text: any }) => part.text)
.join(""),
);
return acc.concat(texts);
},
[],
);
if (textArray.length > existingTexts.length) {
const deltaArray = textArray.slice(existingTexts.length);
existingTexts = textArray;
remainText += deltaArray.join("");
}
} catch (error) { } catch (error) {
// console.log("[Response Animation] error: ", error,partialData);
// skip error message when parsing json // skip error message when parsing json
} }

View File

@ -87,8 +87,7 @@ export const Azure = {
}; };
export const Google = { export const Google = {
ExampleEndpoint: ExampleEndpoint: "https://generativelanguage.googleapis.com/",
"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
ChatPath: "v1beta/models/gemini-pro:generateContent", ChatPath: "v1beta/models/gemini-pro:generateContent",
// /api/openai/v1/chat/completions // /api/openai/v1/chat/completions

View File

@ -321,7 +321,7 @@ const cn = {
Endpoint: { Endpoint: {
Title: "接口地址", Title: "接口地址",
SubTitle: "样例:", SubTitle: "不包含请求路径,样例:",
}, },
ApiVerion: { ApiVerion: {

BIN
docs/images/head-cover.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.6 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.6 KiB

After

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.5 KiB

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 633 B

After

Width:  |  Height:  |  Size: 719 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 1.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 74 KiB