support glm-4v-flash
This commit is contained in:
parent
0a056a7c5c
commit
1d8029301b
|
@ -240,7 +240,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
}
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
if (visionModel && modelConfig.model !== "glm-4v-flash") {
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -265,6 +265,7 @@ export function isVisionModel(model: string) {
|
|||
"learnlm",
|
||||
"qwen-vl",
|
||||
"qwen2-vl",
|
||||
"glm-4v",
|
||||
];
|
||||
const isGpt4Turbo =
|
||||
model.includes("gpt-4-turbo") && !model.includes("preview");
|
||||
|
|
Loading…
Reference in New Issue