Compare commits

..

1 Commits

8 changed files with 303 additions and 73 deletions

View File

@@ -22,12 +22,12 @@ English / [简体中文](./README_CN.md)
[![MacOS][MacOS-image]][download-url]
[![Linux][Linux-image]][download-url]
[NextChatAI](https://nextchat.club?utm_source=readme) / [iOS APP](https://apps.apple.com/us/app/nextchat-ai/id6743085599) / [Web App Demo](https://app.nextchat.club) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Enterprise Edition](#enterprise-edition)
[NextChatAI](https://nextchat.club?utm_source=readme) / [Web App Demo](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev)
[saas-url]: https://nextchat.club?utm_source=readme
[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge
[web-url]: https://app.nextchat.club/
[web-url]: https://app.nextchat.dev/
[download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases
[Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge
[Windows-image]: https://img.shields.io/badge/-Windows-blue?logo=windows
@@ -40,14 +40,13 @@ English / [简体中文](./README_CN.md)
</div>
## 🥳 Cheer for NextChat iOS Version Online!
> [👉 Click Here to Install Now](https://apps.apple.com/us/app/nextchat-ai/id6743085599)
> [❤️ Source Code Coming Soon](https://github.com/ChatGPTNextWeb/NextChat-iOS)
![Github iOS Image](https://github.com/user-attachments/assets/e0aa334f-4c13-4dc9-8310-e3b09fa4b9f3)
## 🥳 Cheer for DeepSeek, China's AI star!
> Purpose-Built UI for DeepSeek Reasoner Model
<img src="https://github.com/user-attachments/assets/f3952210-3af1-4dc0-9b81-40eaa4847d9a"/>
## 🫣 NextChat Support MCP !
> Before build, please set env ENABLE_MCP=true

View File

@@ -56,7 +56,7 @@ export interface OpenAIListModelResponse {
export interface RequestPayload {
messages: {
role: "developer" | "system" | "user" | "assistant";
role: "system" | "user" | "assistant";
content: string | MultimodalContent[];
}[];
stream?: boolean;
@@ -198,8 +198,7 @@ export class ChatGPTApi implements LLMApi {
const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
options.config.model.startsWith("o3");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -238,21 +237,13 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
requestPayload["messages"].unshift({
role: "developer",
content: "Formatting re-enabled",
});
// o1/o3 uses max_completion_tokens to control the number of tokens (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel && !isO1OrO3) {
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@@ -18,7 +18,6 @@ import ReturnIcon from "../icons/return.svg";
import CopyIcon from "../icons/copy.svg";
import SpeakIcon from "../icons/speak.svg";
import SpeakStopIcon from "../icons/speak-stop.svg";
import LoadingIcon from "../icons/three-dots.svg";
import LoadingButtonIcon from "../icons/loading.svg";
import PromptIcon from "../icons/prompt.svg";
import MaskIcon from "../icons/mask.svg";
@@ -79,8 +78,6 @@ import {
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
import { DalleQuality, DalleStyle, ModelSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
@@ -125,14 +122,15 @@ import { getModelProvider } from "../utils/model";
import { RealtimeChat } from "@/app/components/realtime-chat";
import clsx from "clsx";
import { getAvailableClientsCount, isMcpEnabled } from "../mcp/actions";
import { Markdown } from "./markdown";
const localStorage = safeLocalStorage();
const ttsPlayer = createTTSPlayer();
const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
loading: () => <LoadingIcon />,
});
// const Markdown = dynamic(async () => (await import("./markdown")).Markdown, {
// loading: () => <LoadingIcon />,
// });
const MCPAction = () => {
const navigate = useNavigate();
@@ -1984,6 +1982,8 @@ function _Chat() {
fontFamily={fontFamily}
parentRef={scrollRef}
defaultShow={i >= messages.length - 6}
immediatelyRender={i >= messages.length - 3}
streaming={message.streaming}
/>
{getMessageImages(message).length == 1 && (
<img

View File

@@ -267,6 +267,136 @@ function tryWrapHtmlCode(text: string) {
);
}
// Split content into paragraphs while preserving code blocks
function splitContentIntoParagraphs(content: string) {
// Check for unclosed code blocks
const codeBlockStartCount = (content.match(/```/g) || []).length;
let processedContent = content;
// Add closing tag if there's an odd number of code block markers
if (codeBlockStartCount % 2 !== 0) {
processedContent = content + "\n```";
}
// Extract code blocks
const codeBlockRegex = /```[\s\S]*?```/g;
const codeBlocks: string[] = [];
let codeBlockCounter = 0;
// Replace code blocks with placeholders
const contentWithPlaceholders = processedContent.replace(
codeBlockRegex,
(match) => {
codeBlocks.push(match);
const placeholder = `__CODE_BLOCK_${codeBlockCounter++}__`;
return placeholder;
},
);
// Split by double newlines
const paragraphs = contentWithPlaceholders
.split(/\n\n+/)
.filter((p) => p.trim());
// Restore code blocks
return paragraphs.map((p) => {
if (p.match(/__CODE_BLOCK_\d+__/)) {
return p.replace(/__CODE_BLOCK_\d+__/g, (match) => {
const index = parseInt(match.match(/\d+/)?.[0] || "0");
return codeBlocks[index] || match;
});
}
return p;
});
}
// Lazy-loaded paragraph component
function MarkdownParagraph({
content,
onLoad,
}: {
content: string;
onLoad?: () => void;
}) {
const [isLoaded, setIsLoaded] = useState(false);
const placeholderRef = useRef<HTMLDivElement>(null);
const [isVisible, setIsVisible] = useState(false);
useEffect(() => {
let observer: IntersectionObserver;
if (placeholderRef.current) {
observer = new IntersectionObserver(
(entries) => {
if (entries[0].isIntersecting) {
setIsVisible(true);
}
},
{ threshold: 0.1, rootMargin: "200px 0px" },
);
observer.observe(placeholderRef.current);
}
return () => observer?.disconnect();
}, []);
useEffect(() => {
if (isVisible && !isLoaded) {
setIsLoaded(true);
onLoad?.();
}
}, [isVisible, isLoaded, onLoad]);
// Generate preview content
const previewContent = useMemo(() => {
if (content.startsWith("```")) {
return "```" + (content.split("\n")[0] || "").slice(3) + "...```";
}
return content.length > 60 ? content.slice(0, 60) + "..." : content;
}, [content]);
return (
<div className="markdown-paragraph" ref={placeholderRef}>
{!isLoaded ? (
<div className="markdown-paragraph-placeholder">{previewContent}</div>
) : (
<_MarkDownContent content={content} />
)}
</div>
);
}
// Memoized paragraph component to prevent unnecessary re-renders
const MemoizedMarkdownParagraph = React.memo(
({ content }: { content: string }) => {
return <_MarkDownContent content={content} />;
},
(prevProps, nextProps) => prevProps.content === nextProps.content,
);
MemoizedMarkdownParagraph.displayName = "MemoizedMarkdownParagraph";
// Specialized component for streaming content
function StreamingMarkdownContent({ content }: { content: string }) {
const paragraphs = useMemo(
() => splitContentIntoParagraphs(content),
[content],
);
const lastParagraphRef = useRef<HTMLDivElement>(null);
return (
<div className="markdown-streaming-content">
{paragraphs.map((paragraph, index) => (
<div
key={`p-${index}-${paragraph.substring(0, 20)}`}
className="markdown-paragraph markdown-streaming-paragraph"
ref={index === paragraphs.length - 1 ? lastParagraphRef : null}
>
<MemoizedMarkdownParagraph content={paragraph} />
</div>
))}
</div>
);
}
function _MarkDownContent(props: { content: string }) {
const escapedContent = useMemo(() => {
return tryWrapHtmlCode(escapeBrackets(props.content));
@@ -326,9 +456,27 @@ export function Markdown(
fontFamily?: string;
parentRef?: RefObject<HTMLDivElement>;
defaultShow?: boolean;
immediatelyRender?: boolean;
streaming?: boolean; // Whether this is a streaming response
} & React.DOMAttributes<HTMLDivElement>,
) {
const mdRef = useRef<HTMLDivElement>(null);
const paragraphs = useMemo(
() => splitContentIntoParagraphs(props.content),
[props.content],
);
const [loadedCount, setLoadedCount] = useState(0);
// Determine rendering strategy based on props
const shouldAsyncRender =
!props.immediatelyRender && !props.streaming && paragraphs.length > 1;
useEffect(() => {
// Immediately render all paragraphs if specified
if (props.immediatelyRender) {
setLoadedCount(paragraphs.length);
}
}, [props.immediatelyRender, paragraphs.length]);
return (
<div
@@ -344,6 +492,24 @@ export function Markdown(
>
{props.loading ? (
<LoadingIcon />
) : props.streaming ? (
// Use specialized component for streaming content
<StreamingMarkdownContent content={props.content} />
) : shouldAsyncRender ? (
<div className="markdown-content">
{paragraphs.map((paragraph, index) => (
<MarkdownParagraph
key={index}
content={paragraph}
onLoad={() => setLoadedCount((prev) => prev + 1)}
/>
))}
{loadedCount < paragraphs.length && loadedCount > 0 && (
<div className="markdown-paragraph-loading">
<LoadingIcon />
</div>
)}
</div>
) : (
<MarkdownContent content={props.content} />
)}

View File

@@ -417,14 +417,6 @@ export const KnowledgeCutOffDate: Record<string, string> = {
"gpt-4-turbo": "2023-12",
"gpt-4-turbo-2024-04-09": "2023-12",
"gpt-4-turbo-preview": "2023-12",
"gpt-4.1": "2024-06",
"gpt-4.1-2025-04-14": "2024-06",
"gpt-4.1-mini": "2024-06",
"gpt-4.1-mini-2025-04-14": "2024-06",
"gpt-4.1-nano": "2024-06",
"gpt-4.1-nano-2025-04-14": "2024-06",
"gpt-4.5-preview": "2023-10",
"gpt-4.5-preview-2025-02-27": "2023-10",
"gpt-4o": "2023-10",
"gpt-4o-2024-05-13": "2023-10",
"gpt-4o-2024-08-06": "2023-10",
@@ -466,7 +458,6 @@ export const DEFAULT_TTS_VOICES = [
export const VISION_MODEL_REGEXES = [
/vision/,
/gpt-4o/,
/gpt-4\.1/,
/claude-3/,
/gemini-1\.5/,
/gemini-exp/,
@@ -478,8 +469,6 @@ export const VISION_MODEL_REGEXES = [
/^dall-e-3$/, // Matches exactly "dall-e-3"
/glm-4v/,
/vl/i,
/o3/,
/o4-mini/,
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -496,14 +485,6 @@ const openaiModels = [
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
@@ -518,20 +499,23 @@ const openaiModels = [
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
];
const googleModels = [
"gemini-1.0-pro", // Deprecated on 2/15/2025
"gemini-1.5-pro-latest",
"gemini-1.5-pro",
"gemini-1.5-pro-002",
"gemini-1.5-pro-exp-0827",
"gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-002",
"gemini-1.5-flash-exp-0827",
"learnlm-1.5-pro-experimental",
"gemini-exp-1114",
"gemini-exp-1121",
"gemini-exp-1206",
"gemini-2.0-flash",
"gemini-2.0-flash-exp",
@@ -541,7 +525,6 @@ const googleModels = [
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro-preview-06-05",
];
const anthropicModels = [
@@ -628,18 +611,6 @@ const xAIModes = [
"grok-2-vision-1212",
"grok-2-vision",
"grok-2-vision-latest",
"grok-3-mini-fast-beta",
"grok-3-mini-fast",
"grok-3-mini-fast-latest",
"grok-3-mini-beta",
"grok-3-mini",
"grok-3-mini-latest",
"grok-3-fast-beta",
"grok-3-fast",
"grok-3-fast-latest",
"grok-3-beta",
"grok-3",
"grok-3-latest",
];
const chatglmModels = [

View File

@@ -99,6 +99,7 @@
font-size: 14px;
line-height: 1.5;
word-wrap: break-word;
margin-bottom: 0;
}
.light {
@@ -358,8 +359,14 @@
.markdown-body kbd {
display: inline-block;
padding: 3px 5px;
font: 11px ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font:
11px ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
line-height: 10px;
color: var(--color-fg-default);
vertical-align: middle;
@@ -448,16 +455,28 @@
.markdown-body tt,
.markdown-body code,
.markdown-body samp {
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-size: 12px;
}
.markdown-body pre {
margin-top: 0;
margin-bottom: 0;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace;
font-family:
ui-monospace,
SFMono-Regular,
SF Mono,
Menlo,
Consolas,
Liberation Mono,
monospace;
font-size: 12px;
word-wrap: normal;
}
@@ -1130,3 +1149,87 @@
#dmermaid {
display: none;
}
.markdown-content {
width: 100%;
}
.markdown-paragraph {
transition: opacity 0.3s ease;
margin-bottom: 0.5em;
&.markdown-paragraph-visible {
opacity: 1;
}
&.markdown-paragraph-hidden {
opacity: 0.7;
}
}
.markdown-paragraph-placeholder {
padding: 8px;
color: var(--color-fg-subtle);
background-color: var(--color-canvas-subtle);
border-radius: 6px;
border-left: 3px solid var(--color-border-muted);
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
font-family: var(--font-family-sans);
font-size: 14px;
min-height: 1.2em;
}
.markdown-paragraph-loading {
height: 20px;
background-color: var(--color-canvas-subtle);
border-radius: 6px;
margin-bottom: 8px;
position: relative;
overflow: hidden;
&::after {
content: "";
position: absolute;
top: 0;
left: 0;
width: 30%;
height: 100%;
background: linear-gradient(
90deg,
transparent,
rgba(255, 255, 255, 0.1),
transparent
);
animation: shimmer 1.5s infinite;
}
}
@keyframes shimmer {
0% {
transform: translateX(-100%);
}
100% {
transform: translateX(200%);
}
}
.markdown-streaming-content {
width: 100%;
}
.markdown-streaming-paragraph {
opacity: 1;
animation: fadeIn 0.3s ease-in-out;
margin-bottom: 0.5em;
}
@keyframes fadeIn {
from {
opacity: 0.5;
}
to {
opacity: 1;
}
}

View File

@@ -83,7 +83,7 @@
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"lint-staged": "^13.2.2",
"prettier": "^3.6.2",
"prettier": "^3.0.2",
"ts-node": "^10.9.2",
"tsx": "^4.16.0",
"typescript": "5.2.2",

View File

@@ -7076,10 +7076,10 @@ prettier-linter-helpers@^1.0.0:
dependencies:
fast-diff "^1.1.2"
prettier@^3.6.2:
version "3.6.2"
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.6.2.tgz#ccda02a1003ebbb2bfda6f83a074978f608b9393"
integrity sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==
prettier@^3.0.2:
version "3.0.2"
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.2.tgz#78fcecd6d870551aa5547437cdae39d4701dca5b"
integrity sha512-o2YR9qtniXvwEZlOKbveKfDQVyqxbEIWn48Z8m3ZJjBjcCmUy3xZGIv+7AkaeuaTr6yPXJjwv07ZWlsWbEy1rQ==
pretty-format@^27.0.2:
version "27.5.1"