From c0f2ab6de30d058cf3e5e8b820d0ccadcdd6579f Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 8 Aug 2025 15:01:50 +0800 Subject: [PATCH 1/5] add gpt-5 --- app/constant.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/app/constant.ts b/app/constant.ts index 6bececb55..c2a633471 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -493,6 +493,7 @@ export const VISION_MODEL_REGEXES = [ /o3/, /o4-mini/, /grok-4/i, + /gpt-5/ ]; export const EXCLUDE_VISION_MODEL_REGEXES = [/claude-3-5-haiku-20241022/]; @@ -517,6 +518,11 @@ const openaiModels = [ "gpt-4.1-nano-2025-04-14", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", + "gpt-5-chat", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5" + "gpt-5-chat-2025-01-01-preview" "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", From 8ae6883784eb15aa6fbc28c445172ce2413e9fb1 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 8 Aug 2025 15:05:44 +0800 Subject: [PATCH 2/5] add gpt-5 --- app/constant.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/constant.ts b/app/constant.ts index c2a633471..5f36ba25c 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -521,8 +521,8 @@ const openaiModels = [ "gpt-5-chat", "gpt-5-mini", "gpt-5-nano", - "gpt-5" - "gpt-5-chat-2025-01-01-preview" + "gpt-5", + "gpt-5-chat-2025-01-01-preview", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", From 42eff644b4a169c8b812f6a2b6b493f9f9a655eb Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 8 Aug 2025 15:28:54 +0800 Subject: [PATCH 3/5] use max_completion_tokens --- app/client/platforms/openai.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 51ae71ea6..715637692 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -200,6 +200,7 @@ export class ChatGPTApi implements LLMApi { options.config.model.startsWith("o1") || options.config.model.startsWith("o3") || options.config.model.startsWith("o4-mini"); + const isGpt5 = options.config.model.startsWith("gpt-5"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -251,6 +252,10 @@ export class ChatGPTApi implements LLMApi { requestPayload["max_completion_tokens"] = modelConfig.max_tokens; } + if (isGpt5) { + requestPayload["max_completion_tokens"] = modelConfig.max_tokens; + } + // add max_tokens to vision model if (visionModel && !isO1OrO3) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); From 05118089004c462491a174e3fbd3a0e2816a0e5c Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 8 Aug 2025 16:13:12 +0800 Subject: [PATCH 4/5] use max_completion_tokens --- app/client/platforms/openai.ts | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 715637692..ec3c5fbd2 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -239,7 +239,13 @@ export class ChatGPTApi implements LLMApi { // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - if (isO1OrO3) { + if (isGpt5) { + // Remove max_tokens if present + delete requestPayload.max_tokens; + // Add max_completion_tokens (or max_completion_tokens if that's what you meant) + requestPayload["max_completion_tokens"] = modelConfig.max_tokens; + + } else if (isO1OrO3) { // by default the o1/o3 models will not attempt to produce output that includes markdown formatting // manually add "Formatting re-enabled" developer message to encourage markdown inclusion in model responses // (https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#markdown-output) @@ -252,12 +258,9 @@ export class ChatGPTApi implements LLMApi { requestPayload["max_completion_tokens"] = modelConfig.max_tokens; } - if (isGpt5) { - requestPayload["max_completion_tokens"] = modelConfig.max_tokens; - } // add max_tokens to vision model - if (visionModel && !isO1OrO3) { + if (visionModel && !isO1OrO3 && ! isGpt5) { requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); } } From 38ac502d8008002735166ecbe7fffa150bbe922b Mon Sep 17 00:00:00 2001 From: Sam Date: Sat, 9 Aug 2025 16:57:31 +0800 Subject: [PATCH 5/5] Add support for GPT5 --- app/client/platforms/openai.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index ec3c5fbd2..cfbff99e9 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -231,7 +231,7 @@ export class ChatGPTApi implements LLMApi { messages, stream: options.config.stream, model: modelConfig.model, - temperature: !isO1OrO3 ? modelConfig.temperature : 1, + temperature: (!isO1OrO3 && !isGpt5) ? modelConfig.temperature : 1, presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0, frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0, top_p: !isO1OrO3 ? modelConfig.top_p : 1,