Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
This commit is contained in:
commit
93252a34e6
|
@ -63,7 +63,7 @@ dist
|
||||||
|
|
||||||
# Gatsby files
|
# Gatsby files
|
||||||
.cache/
|
.cache/
|
||||||
public
|
|
||||||
|
|
||||||
# Vuepress build output
|
# Vuepress build output
|
||||||
.vuepress/dist
|
.vuepress/dist
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# Your openai api key. (required)
|
# Your openai api key. (required)
|
||||||
OPENAI_API_KEY=sk-xxxx
|
OPENAI_API_KEY=sk-xxxx
|
||||||
|
|
||||||
# Access passsword, separated by comma. (optional)
|
# Access password, separated by comma. (optional)
|
||||||
CODE=your-password
|
CODE=your-password
|
||||||
|
|
||||||
# You can start service behind a proxy
|
# You can start service behind a proxy
|
||||||
|
|
|
@ -43,10 +43,6 @@ export async function requestOpenai(req: NextRequest) {
|
||||||
|
|
||||||
console.log("[Proxy] ", path);
|
console.log("[Proxy] ", path);
|
||||||
console.log("[Base Url]", baseUrl);
|
console.log("[Base Url]", baseUrl);
|
||||||
// this fix [Org ID] undefined in server side if not using custom point
|
|
||||||
if (serverConfig.openaiOrgId !== undefined) {
|
|
||||||
console.log("[Org ID]", serverConfig.openaiOrgId);
|
|
||||||
}
|
|
||||||
|
|
||||||
const timeoutId = setTimeout(
|
const timeoutId = setTimeout(
|
||||||
() => {
|
() => {
|
||||||
|
@ -116,18 +112,37 @@ export async function requestOpenai(req: NextRequest) {
|
||||||
try {
|
try {
|
||||||
const res = await fetch(fetchUrl, fetchOptions);
|
const res = await fetch(fetchUrl, fetchOptions);
|
||||||
|
|
||||||
|
// Extract the OpenAI-Organization header from the response
|
||||||
|
const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
|
||||||
|
|
||||||
|
// Check if serverConfig.openaiOrgId is defined and not an empty string
|
||||||
|
if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
|
||||||
|
// If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
|
||||||
|
console.log("[Org ID]", openaiOrganizationHeader);
|
||||||
|
} else {
|
||||||
|
console.log("[Org ID] is not set up.");
|
||||||
|
}
|
||||||
|
|
||||||
// to prevent browser prompt for credentials
|
// to prevent browser prompt for credentials
|
||||||
const newHeaders = new Headers(res.headers);
|
const newHeaders = new Headers(res.headers);
|
||||||
newHeaders.delete("www-authenticate");
|
newHeaders.delete("www-authenticate");
|
||||||
// to disable nginx buffering
|
// to disable nginx buffering
|
||||||
newHeaders.set("X-Accel-Buffering", "no");
|
newHeaders.set("X-Accel-Buffering", "no");
|
||||||
|
|
||||||
|
|
||||||
|
// Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
|
||||||
|
// Also, this is to prevent the header from being sent to the client
|
||||||
|
if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") {
|
||||||
|
newHeaders.delete("OpenAI-Organization");
|
||||||
|
}
|
||||||
|
|
||||||
// The latest version of the OpenAI API forced the content-encoding to be "br" in json response
|
// The latest version of the OpenAI API forced the content-encoding to be "br" in json response
|
||||||
// So if the streaming is disabled, we need to remove the content-encoding header
|
// So if the streaming is disabled, we need to remove the content-encoding header
|
||||||
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
|
// Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header
|
||||||
// The browser will try to decode the response with brotli and fail
|
// The browser will try to decode the response with brotli and fail
|
||||||
newHeaders.delete("content-encoding");
|
newHeaders.delete("content-encoding");
|
||||||
|
|
||||||
|
|
||||||
return new Response(res.body, {
|
return new Response(res.body, {
|
||||||
status: res.status,
|
status: res.status,
|
||||||
statusText: res.statusText,
|
statusText: res.statusText,
|
||||||
|
|
|
@ -228,19 +228,21 @@ export class ChatGPTApi implements LLMApi {
|
||||||
}
|
}
|
||||||
const text = msg.data;
|
const text = msg.data;
|
||||||
try {
|
try {
|
||||||
const json = JSON.parse(text) as {
|
const json = JSON.parse(text);
|
||||||
choices: Array<{
|
const choices = json.choices as Array<{ delta: { content: string } }>;
|
||||||
delta: {
|
const delta = choices[0]?.delta?.content;
|
||||||
content: string;
|
const textmoderation = json?.prompt_filter_results;
|
||||||
};
|
|
||||||
}>;
|
|
||||||
};
|
|
||||||
const delta = json.choices[0]?.delta?.content;
|
|
||||||
if (delta) {
|
if (delta) {
|
||||||
remainText += delta;
|
remainText += delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (textmoderation && textmoderation.length > 0 && ServiceProvider.Azure) {
|
||||||
|
const contentFilterResults = textmoderation[0]?.content_filter_results;
|
||||||
|
console.log(`[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, contentFilterResults);
|
||||||
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.error("[Request] parse error", text);
|
console.error("[Request] parse error", text, msg);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onclose() {
|
onclose() {
|
||||||
|
|
|
@ -227,7 +227,7 @@ export function MessageSelector(props: {
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className={styles["checkbox"]}>
|
<div className={styles["checkbox"]}>
|
||||||
<input type="checkbox" checked={isSelected}></input>
|
<input type="checkbox" checked={isSelected} readOnly></input>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
|
|
|
@ -557,6 +557,7 @@ export const useChatStore = createPersistStore(
|
||||||
messages: topicMessages,
|
messages: topicMessages,
|
||||||
config: {
|
config: {
|
||||||
model: getSummarizeModel(session.mask.modelConfig.model),
|
model: getSummarizeModel(session.mask.modelConfig.model),
|
||||||
|
stream: false,
|
||||||
},
|
},
|
||||||
onFinish(message) {
|
onFinish(message) {
|
||||||
get().updateCurrentSession(
|
get().updateCurrentSession(
|
||||||
|
@ -600,6 +601,10 @@ export const useChatStore = createPersistStore(
|
||||||
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
historyMsgLength > modelConfig.compressMessageLengthThreshold &&
|
||||||
modelConfig.sendMemory
|
modelConfig.sendMemory
|
||||||
) {
|
) {
|
||||||
|
/** Destruct max_tokens while summarizing
|
||||||
|
* this param is just shit
|
||||||
|
**/
|
||||||
|
const { max_tokens, ...modelcfg } = modelConfig;
|
||||||
api.llm.chat({
|
api.llm.chat({
|
||||||
messages: toBeSummarizedMsgs.concat(
|
messages: toBeSummarizedMsgs.concat(
|
||||||
createMessage({
|
createMessage({
|
||||||
|
@ -609,7 +614,7 @@ export const useChatStore = createPersistStore(
|
||||||
}),
|
}),
|
||||||
),
|
),
|
||||||
config: {
|
config: {
|
||||||
...modelConfig,
|
...modelcfg,
|
||||||
stream: true,
|
stream: true,
|
||||||
model: getSummarizeModel(session.mask.modelConfig.model),
|
model: getSummarizeModel(session.mask.modelConfig.model),
|
||||||
},
|
},
|
||||||
|
|
Loading…
Reference in New Issue