diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index c6f3fc425..3ff1b5c7a 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -225,6 +225,23 @@ export class ChatGPTApi implements LLMApi { } // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet. + + const isNewModel = options.config.model.endsWith("-search-preview"); + + requestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + ...(isNewModel + ? {} // Exclude parameters for the new model + : { + temperature: !isO1OrO3 ? modelConfig.temperature : 1, + presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0, + frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0, + top_p: !isO1OrO3 ? modelConfig.top_p : 1, + }), + }; + /* requestPayload = { messages, stream: options.config.stream, @@ -236,7 +253,7 @@ export class ChatGPTApi implements LLMApi { // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - + */ // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) if (isO1OrO3) { requestPayload["max_completion_tokens"] = modelConfig.max_tokens;