Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot]
21e032492c chore(deps): bump remark-gfm from 3.0.1 to 4.0.1
Bumps [remark-gfm](https://github.com/remarkjs/remark-gfm) from 3.0.1 to 4.0.1.
- [Release notes](https://github.com/remarkjs/remark-gfm/releases)
- [Commits](https://github.com/remarkjs/remark-gfm/compare/3.0.1...4.0.1)

---
updated-dependencies:
- dependency-name: remark-gfm
  dependency-version: 4.0.1
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-28 17:43:27 +00:00
5 changed files with 552 additions and 224 deletions

View File

@@ -200,7 +200,6 @@ export class ChatGPTApi implements LLMApi {
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3") ||
options.config.model.startsWith("o4-mini");
const isGpt5 = options.config.model.startsWith("gpt-5");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
@@ -231,7 +230,7 @@ export class ChatGPTApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1,
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
@@ -239,13 +238,7 @@ export class ChatGPTApi implements LLMApi {
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
if (isGpt5) {
// Remove max_tokens if present
delete requestPayload.max_tokens;
// Add max_completion_tokens (or max_completion_tokens if that's what you meant)
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} else if (isO1OrO3) {
if (isO1OrO3) {
// by default the o1/o3 models will not attempt to produce output that includes markdown formatting
// manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses
// (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output)
@@ -258,9 +251,8 @@ export class ChatGPTApi implements LLMApi {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}
// add max_tokens to vision model
if (visionModel && !isO1OrO3 && ! isGpt5) {
if (visionModel && !isO1OrO3) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@@ -493,7 +493,6 @@ export const VISION_MODEL_REGEXES = [
/o3/,
/o4-mini/,
/grok-4/i,
/gpt-5/
];
export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/];
@@ -518,11 +517,6 @@ const openaiModels = [
"gpt-4.1-nano-2025-04-14",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-5-chat",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5",
"gpt-5-chat-2025-01-01-preview",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",

View File

@@ -39,7 +39,7 @@
"markdown-to-txt": "^2.0.1",
"mermaid": "^10.6.1",
"nanoid": "^5.0.3",
"next": "^14.2.32",
"next": "^14.1.1",
"node-fetch": "^3.3.1",
"openapi-client-axios": "^7.5.5",
"react": "^18.2.0",
@@ -49,7 +49,7 @@
"rehype-highlight": "^6.0.0",
"rehype-katex": "^6.0.3",
"remark-breaks": "^3.0.2",
"remark-gfm": "^3.0.1",
"remark-gfm": "^4.0.1",
"remark-math": "^5.1.1",
"rt-client": "https://github.com/Azure-Samples/aoai-realtime-audio-sdk/releases/download/js/v0.5.0/rt-client-0.5.0.tgz",
"sass": "^1.59.2",

View File

@@ -9,7 +9,7 @@
},
"package": {
"productName": "NextChat",
"version": "2.16.1"
"version": "2.15.8"
},
"tauri": {
"allowlist": {

750
yarn.lock

File diff suppressed because it is too large Load Diff