fix no max_tokens in payload when the vision model name does not contain 'vision'.
This commit is contained in:
parent
ffe32694b0
commit
56eb9d1430
|
@ -190,7 +190,7 @@ export class ChatGPTApi implements LLMApi {
|
||||||
};
|
};
|
||||||
|
|
||||||
// add max_tokens to vision model
|
// add max_tokens to vision model
|
||||||
if (visionModel && modelConfig.model.includes("preview")) {
|
if (visionModel) {
|
||||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue