fix max_completions_tokens

This commit is contained in:
lyf 2024-10-16 16:24:02 +08:00
parent c139038e01
commit ebe617b733
2 changed files with 10 additions and 7 deletions

View File

@ -63,7 +63,7 @@ export interface RequestPayload {
presence_penalty: number;
frequency_penalty: number;
top_p: number;
max_tokens?: number;
max_completions_tokens?: number;
}
export interface DalleRequestPayload {
@ -228,13 +228,16 @@ export class ChatGPTApi implements LLMApi {
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
top_p: !isO1 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
// max_completions_tokens: Math.max(modelConfig.max_completions_tokens, 1024),
// Please do not ask me why not send max_completions_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// add max_tokens to vision model
// add max_completions_tokens to vision model
if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
requestPayload["max_completions_tokens"] = Math.max(
modelConfig.max_completions_tokens,
4000,
);
}
}

View File

@ -65,7 +65,7 @@ export const DEFAULT_CONFIG = {
providerName: "OpenAI" as ServiceProvider,
temperature: 0.5,
top_p: 1,
max_tokens: 4000,
max_completions_tokens: 4000,
presence_penalty: 0,
frequency_penalty: 0,
sendMemory: true,
@ -127,7 +127,7 @@ export const ModalConfigValidator = {
model(x: string) {
return x as ModelType;
},
max_tokens(x: number) {
max_completions_tokens(x: number) {
return limitNumber(x, 0, 512000, 1024);
},
presence_penalty(x: number) {