support glm-4v-flash

This commit is contained in:
dupl 2024-12-21 08:17:58 +08:00
parent 0a056a7c5c
commit 1d8029301b
2 changed files with 2 additions and 1 deletions

View File

@ -240,7 +240,7 @@ export class ChatGPTApi implements LLMApi {
}
// add max_tokens to vision model
if (visionModel) {
if (visionModel && modelConfig.model !== "glm-4v-flash") {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}

View File

@ -265,6 +265,7 @@ export function isVisionModel(model: string) {
"learnlm",
"qwen-vl",
"qwen2-vl",
"glm-4v",
];
const isGpt4Turbo =
model.includes("gpt-4-turbo") && !model.includes("preview");