mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-08 23:56:33 +08:00
Add vision support (#4076)
This commit is contained in:
@@ -9,7 +9,14 @@ import {
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
LLMUsage,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
@@ -18,6 +25,11 @@ import {
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { makeAzurePath } from "@/app/azure";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
@@ -72,9 +84,10 @@ export class ChatGPTApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: v.content,
|
||||
content: visionModel ? v.content : getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
|
Reference in New Issue
Block a user