fix: remove inappropriate parameters for o3 from the request

This commit is contained in:
AndrewS 2025-02-02 22:36:34 +01:00
parent 1e20b64048
commit 566406f62d
1 changed files with 7 additions and 6 deletions

View File

@ -196,6 +196,7 @@ export class ChatGPTApi implements LLMApi {
const isDalle3 = _isDalle3(options.config.model); const isDalle3 = _isDalle3(options.config.model);
const isO1 = options.config.model.startsWith("o1"); const isO1 = options.config.model.startsWith("o1");
const isO3 = options.config.model.startsWith("o3");
if (isDalle3) { if (isDalle3) {
const prompt = getMessageTextContent( const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any, options.messages.slice(-1)?.pop() as any,
@ -217,7 +218,7 @@ export class ChatGPTApi implements LLMApi {
const content = visionModel const content = visionModel
? await preProcessImageContent(v.content) ? await preProcessImageContent(v.content)
: getMessageTextContent(v); : getMessageTextContent(v);
if (!(isO1 && v.role === "system")) if (!((isO1 || isO3) && v.role === "system"))
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
@ -226,16 +227,16 @@ export class ChatGPTApi implements LLMApi {
messages, messages,
stream: options.config.stream, stream: options.config.stream,
model: modelConfig.model, model: modelConfig.model,
temperature: !isO1 ? modelConfig.temperature : 1, temperature: !(isO1 || isO3)? modelConfig.temperature : 1,
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, presence_penalty: !(isO1 || isO3) ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, frequency_penalty: !(isO1 || isO3) ? modelConfig.frequency_penalty : 0,
top_p: !isO1 ? modelConfig.top_p : 1, top_p: !(isO1 || isO3) ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024), // max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1) { if (isO1 || isO3) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens; requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
} }