Compare commits

...

1 Commits

Author SHA1 Message Date
lyf
ebe617b733 fix max_completions_tokens 2024-10-16 16:24:02 +08:00
2 changed files with 10 additions and 7 deletions

View File

@@ -63,7 +63,7 @@ export interface RequestPayload {
presence_penalty: number; presence_penalty: number;
frequency_penalty: number; frequency_penalty: number;
top_p: number; top_p: number;
max_tokens?: number; max_completions_tokens?: number;
} }
export interface DalleRequestPayload { export interface DalleRequestPayload {
@@ -228,13 +228,16 @@ export class ChatGPTApi implements LLMApi {
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
top_p: !isO1 ? modelConfig.top_p : 1, top_p: !isO1 ? modelConfig.top_p : 1,
// max_tokens: Math.max(modelConfig.max_tokens, 1024), // max_completions_tokens: Math.max(modelConfig.max_completions_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. // Please do not ask me why not send max_completions_tokens, no reason, this param is just shit, I dont want to explain anymore.
}; };
// add max_tokens to vision model // add max_completions_tokens to vision model
if (visionModel) { if (visionModel) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); requestPayload["max_completions_tokens"] = Math.max(
modelConfig.max_completions_tokens,
4000,
);
} }
} }

View File

@@ -65,7 +65,7 @@ export const DEFAULT_CONFIG = {
providerName: "OpenAI" as ServiceProvider, providerName: "OpenAI" as ServiceProvider,
temperature: 0.5, temperature: 0.5,
top_p: 1, top_p: 1,
max_tokens: 4000, max_completions_tokens: 4000,
presence_penalty: 0, presence_penalty: 0,
frequency_penalty: 0, frequency_penalty: 0,
sendMemory: true, sendMemory: true,
@@ -127,7 +127,7 @@ export const ModalConfigValidator = {
model(x: string) { model(x: string) {
return x as ModelType; return x as ModelType;
}, },
max_tokens(x: number) { max_completions_tokens(x: number) {
return limitNumber(x, 0, 512000, 1024); return limitNumber(x, 0, 512000, 1024);
}, },
presence_penalty(x: number) { presence_penalty(x: number) {