mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-09 12:57:25 +08:00
merge main
This commit is contained in:
@@ -9,6 +9,10 @@ import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
import { ClaudeApi } from "./platforms/anthropic";
|
||||
import { ErnieApi } from "./platforms/baidu";
|
||||
import { DoubaoApi } from "./platforms/bytedance";
|
||||
import { QwenApi } from "./platforms/alibaba";
|
||||
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
||||
@@ -30,6 +34,7 @@ export interface RequestMessage {
|
||||
|
||||
export interface LLMConfig {
|
||||
model: string;
|
||||
providerName?: string;
|
||||
temperature?: number;
|
||||
top_p?: number;
|
||||
stream?: boolean;
|
||||
@@ -54,6 +59,7 @@ export interface LLMUsage {
|
||||
|
||||
export interface LLMModel {
|
||||
name: string;
|
||||
displayName?: string;
|
||||
available: boolean;
|
||||
provider: LLMModelProvider;
|
||||
}
|
||||
@@ -102,6 +108,15 @@ export class ClientApi {
|
||||
case ModelProvider.Claude:
|
||||
this.llm = new ClaudeApi();
|
||||
break;
|
||||
case ModelProvider.Ernie:
|
||||
this.llm = new ErnieApi();
|
||||
break;
|
||||
case ModelProvider.Doubao:
|
||||
this.llm = new DoubaoApi();
|
||||
break;
|
||||
case ModelProvider.Qwen:
|
||||
this.llm = new QwenApi();
|
||||
break;
|
||||
default:
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
@@ -155,37 +170,100 @@ export class ClientApi {
|
||||
|
||||
export function getHeaders() {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const chatStore = useChatStore.getState();
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
};
|
||||
const modelConfig = useChatStore.getState().currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.model.startsWith("gemini");
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
const authHeader = isAzure ? "api-key" : "Authorization";
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
const clientConfig = getClientConfig();
|
||||
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
const validString = (x: string) => x && x.length > 0;
|
||||
|
||||
const clientConfig = getClientConfig();
|
||||
|
||||
function getConfig() {
|
||||
const modelConfig = chatStore.currentSession().mask.modelConfig;
|
||||
const isGoogle = modelConfig.providerName == ServiceProvider.Google;
|
||||
const isAzure = modelConfig.providerName === ServiceProvider.Azure;
|
||||
const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic;
|
||||
const isBaidu = modelConfig.providerName == ServiceProvider.Baidu;
|
||||
const isByteDance = modelConfig.providerName === ServiceProvider.ByteDance;
|
||||
const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba;
|
||||
const isEnabledAccessControl = accessStore.enabledAccessControl();
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: isAnthropic
|
||||
? accessStore.anthropicApiKey
|
||||
: isByteDance
|
||||
? accessStore.bytedanceApiKey
|
||||
: isAlibaba
|
||||
? accessStore.alibabaApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
return {
|
||||
isGoogle,
|
||||
isAzure,
|
||||
isAnthropic,
|
||||
isBaidu,
|
||||
isByteDance,
|
||||
isAlibaba,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
};
|
||||
}
|
||||
|
||||
function getAuthHeader(): string {
|
||||
return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization";
|
||||
}
|
||||
|
||||
function getBearerToken(apiKey: string, noBearer: boolean = false): string {
|
||||
return validString(apiKey)
|
||||
? `${noBearer ? "" : "Bearer "}${apiKey.trim()}`
|
||||
: "";
|
||||
}
|
||||
|
||||
function validString(x: string): boolean {
|
||||
return x?.length > 0;
|
||||
}
|
||||
const {
|
||||
isGoogle,
|
||||
isAzure,
|
||||
isAnthropic,
|
||||
isBaidu,
|
||||
apiKey,
|
||||
isEnabledAccessControl,
|
||||
} = getConfig();
|
||||
// when using google api in app, not set auth header
|
||||
if (!(isGoogle && clientConfig?.isApp)) {
|
||||
// use user's api key first
|
||||
if (validString(apiKey)) {
|
||||
headers[authHeader] = makeBearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
validString(accessStore.accessCode)
|
||||
) {
|
||||
headers[authHeader] = makeBearer(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
if (isGoogle && clientConfig?.isApp) return headers;
|
||||
// when using baidu api in app, not set auth header
|
||||
if (isBaidu && clientConfig?.isApp) return headers;
|
||||
|
||||
const authHeader = getAuthHeader();
|
||||
|
||||
const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic);
|
||||
|
||||
if (bearerToken) {
|
||||
headers[authHeader] = bearerToken;
|
||||
} else if (isEnabledAccessControl && validString(accessStore.accessCode)) {
|
||||
headers["Authorization"] = getBearerToken(
|
||||
ACCESS_CODE_PREFIX + accessStore.accessCode,
|
||||
);
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
export function getClientApi(provider: ServiceProvider): ClientApi {
|
||||
switch (provider) {
|
||||
case ServiceProvider.Google:
|
||||
return new ClientApi(ModelProvider.GeminiPro);
|
||||
case ServiceProvider.Anthropic:
|
||||
return new ClientApi(ModelProvider.Claude);
|
||||
case ServiceProvider.Baidu:
|
||||
return new ClientApi(ModelProvider.Ernie);
|
||||
case ServiceProvider.ByteDance:
|
||||
return new ClientApi(ModelProvider.Doubao);
|
||||
case ServiceProvider.Alibaba:
|
||||
return new ClientApi(ModelProvider.Qwen);
|
||||
default:
|
||||
return new ClientApi(ModelProvider.GPT);
|
||||
}
|
||||
}
|
||||
|
268
app/client/platforms/alibaba.ts
Normal file
268
app/client/platforms/alibaba.ts
Normal file
@@ -0,0 +1,268 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
Alibaba,
|
||||
ALIBABA_BASE_URL,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestInput {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
}
|
||||
interface RequestParam {
|
||||
result_format: string;
|
||||
incremental_output?: boolean;
|
||||
temperature: number;
|
||||
repetition_penalty?: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
}
|
||||
interface RequestPayload {
|
||||
model: string;
|
||||
input: RequestInput;
|
||||
parameters: RequestParam;
|
||||
}
|
||||
|
||||
export class QwenApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.alibabaUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp ? ALIBABA_BASE_URL : ApiPath.Alibaba;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Alibaba)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res?.output?.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const requestPayload: RequestPayload = {
|
||||
model: modelConfig.model,
|
||||
input: {
|
||||
messages,
|
||||
},
|
||||
parameters: {
|
||||
result_format: "message",
|
||||
incremental_output: shouldStream,
|
||||
temperature: modelConfig.temperature,
|
||||
// max_tokens: modelConfig.max_tokens,
|
||||
top_p: modelConfig.top_p === 1 ? 0.99 : modelConfig.top_p, // qwen top_p is should be < 1
|
||||
},
|
||||
};
|
||||
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(Alibaba.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
...getHeaders(),
|
||||
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
|
||||
},
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Alibaba] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.output.choices as Array<{
|
||||
message: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.message?.content;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
export { Alibaba };
|
@@ -1,5 +1,5 @@
|
||||
import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant";
|
||||
import { ChatOptions, LLMApi, MultimodalContent } from "../api";
|
||||
import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
@@ -190,11 +190,10 @@ export class ClaudeApi implements LLMApi {
|
||||
body: JSON.stringify(requestBody),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
"x-api-key": accessStore.anthropicApiKey,
|
||||
...getHeaders(), // get common headers
|
||||
"anthropic-version": accessStore.anthropicApiVersion,
|
||||
Authorization: getAuthKey(accessStore.anthropicApiKey),
|
||||
// do not send `anthropicApiKey` in browser!!!
|
||||
// Authorization: getAuthKey(accessStore.anthropicApiKey),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -389,27 +388,3 @@ function trimEnd(s: string, end = " ") {
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
function bearer(value: string) {
|
||||
return `Bearer ${value.trim()}`;
|
||||
}
|
||||
|
||||
function getAuthKey(apiKey = "") {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
let authKey = "";
|
||||
|
||||
if (apiKey) {
|
||||
// use user's api key first
|
||||
authKey = bearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
!isApp &&
|
||||
!!accessStore.accessCode
|
||||
) {
|
||||
// or use access code
|
||||
authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode);
|
||||
}
|
||||
|
||||
return authKey;
|
||||
}
|
||||
|
273
app/client/platforms/baidu.ts
Normal file
273
app/client/platforms/baidu.ts
Normal file
@@ -0,0 +1,273 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
Baidu,
|
||||
BAIDU_BASE_URL,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getAccessToken } from "@/app/utils/baidu";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
model: string;
|
||||
temperature: number;
|
||||
presence_penalty: number;
|
||||
frequency_penalty: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
}
|
||||
|
||||
export class ErnieApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.baiduUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
// do not use proxy for baidubce api
|
||||
baseUrl = isApp ? BAIDU_BASE_URL : ApiPath.Baidu;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.Baidu)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
// "error_code": 336006, "error_msg": "the length of messages must be an odd number",
|
||||
if (messages.length % 2 === 0) {
|
||||
messages.unshift({
|
||||
role: "user",
|
||||
content: " ",
|
||||
});
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: shouldStream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
|
||||
console.log("[Request] Baidu payload: ", requestPayload);
|
||||
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
let chatPath = this.path(Baidu.ChatPath(modelConfig.model));
|
||||
|
||||
// getAccessToken can not run in browser, because cors error
|
||||
if (!!getClientConfig()?.isApp) {
|
||||
const accessStore = useAccessStore.getState();
|
||||
if (accessStore.useCustomConfig) {
|
||||
if (accessStore.isValidBaidu()) {
|
||||
const { access_token } = await getAccessToken(
|
||||
accessStore.baiduApiKey,
|
||||
accessStore.baiduSecretKey,
|
||||
);
|
||||
chatPath = `${chatPath}${
|
||||
chatPath.includes("?") ? "&" : "?"
|
||||
}access_token=${access_token}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("[Baidu] request response content type: ", contentType);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const delta = json?.result;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = resJson?.result;
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
export { Baidu };
|
255
app/client/platforms/bytedance.ts
Normal file
255
app/client/platforms/bytedance.ts
Normal file
@@ -0,0 +1,255 @@
|
||||
"use client";
|
||||
import {
|
||||
ApiPath,
|
||||
ByteDance,
|
||||
BYTEDANCE_BASE_URL,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
MultimodalContent,
|
||||
} from "../api";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
object: string;
|
||||
data: Array<{
|
||||
id: string;
|
||||
object: string;
|
||||
root: string;
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
model: string;
|
||||
temperature: number;
|
||||
presence_penalty: number;
|
||||
frequency_penalty: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
}
|
||||
|
||||
export class DoubaoApi implements LLMApi {
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
if (accessStore.useCustomConfig) {
|
||||
baseUrl = accessStore.bytedanceUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp ? BYTEDANCE_BASE_URL : ApiPath.ByteDance;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.ByteDance)) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
console.log("[Proxy Endpoint] ", baseUrl, path);
|
||||
|
||||
return [baseUrl, path].join("/");
|
||||
}
|
||||
|
||||
extractMessage(res: any) {
|
||||
return res.choices?.at(0)?.message?.content ?? "";
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
}));
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: shouldStream,
|
||||
model: modelConfig.model,
|
||||
temperature: modelConfig.temperature,
|
||||
presence_penalty: modelConfig.presence_penalty,
|
||||
frequency_penalty: modelConfig.frequency_penalty,
|
||||
top_p: modelConfig.top_p,
|
||||
};
|
||||
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(ByteDance.ChatPath);
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
// animate response to make it looks smooth
|
||||
function animateResponseText() {
|
||||
if (finished || controller.signal.aborted) {
|
||||
responseText += remainText;
|
||||
console.log("[Response Animation] finished");
|
||||
if (responseText?.length === 0) {
|
||||
options.onError?.(new Error("empty response from server"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (remainText.length > 0) {
|
||||
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
|
||||
const fetchText = remainText.slice(0, fetchCount);
|
||||
responseText += fetchText;
|
||||
remainText = remainText.slice(fetchCount);
|
||||
options.onUpdate?.(responseText, fetchText);
|
||||
}
|
||||
|
||||
requestAnimationFrame(animateResponseText);
|
||||
}
|
||||
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
const finish = () => {
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[ByteDance] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{
|
||||
delta: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.delta?.content;
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
|
||||
const resJson = await res.json();
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat request", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
|
||||
async models(): Promise<LLMModel[]> {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
export { ByteDance };
|
@@ -3,6 +3,12 @@ import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import Locale from "../../locales";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
@@ -20,7 +26,7 @@ export class GeminiProApi implements LLMApi {
|
||||
);
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
// const apiClient = this;
|
||||
const apiClient = this;
|
||||
let multimodal = false;
|
||||
const messages = options.messages.map((v) => {
|
||||
let parts: any[] = [{ text: getMessageTextContent(v) }];
|
||||
@@ -120,7 +126,9 @@ export class GeminiProApi implements LLMApi {
|
||||
|
||||
if (!baseUrl) {
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/google/" + Google.ChatPath(modelConfig.model)
|
||||
? DEFAULT_API_HOST +
|
||||
"/api/proxy/google/" +
|
||||
Google.ChatPath(modelConfig.model)
|
||||
: this.path(Google.ChatPath(modelConfig.model));
|
||||
}
|
||||
|
||||
@@ -139,16 +147,17 @@ export class GeminiProApi implements LLMApi {
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
|
||||
if (shouldStream) {
|
||||
let responseText = "";
|
||||
let remainText = "";
|
||||
let finished = false;
|
||||
|
||||
let existingTexts: string[] = [];
|
||||
const finish = () => {
|
||||
finished = true;
|
||||
options.onFinish(existingTexts.join(""));
|
||||
if (!finished) {
|
||||
finished = true;
|
||||
options.onFinish(responseText + remainText);
|
||||
}
|
||||
};
|
||||
|
||||
// animate response to make it looks smooth
|
||||
@@ -173,72 +182,85 @@ export class GeminiProApi implements LLMApi {
|
||||
// start animaion
|
||||
animateResponseText();
|
||||
|
||||
fetch(
|
||||
baseUrl.replace("generateContent", "streamGenerateContent"),
|
||||
chatPayload,
|
||||
)
|
||||
.then((response) => {
|
||||
const reader = response?.body?.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let partialData = "";
|
||||
controller.signal.onabort = finish;
|
||||
|
||||
return reader?.read().then(function processText({
|
||||
done,
|
||||
value,
|
||||
}): Promise<any> {
|
||||
if (done) {
|
||||
if (response.status !== 200) {
|
||||
try {
|
||||
let data = JSON.parse(ensureProperEnding(partialData));
|
||||
if (data && data[0].error) {
|
||||
options.onError?.(new Error(data[0].error.message));
|
||||
} else {
|
||||
options.onError?.(new Error("Request failed"));
|
||||
}
|
||||
} catch (_) {
|
||||
options.onError?.(new Error("Request failed"));
|
||||
}
|
||||
}
|
||||
// https://github.com/google-gemini/cookbook/blob/main/quickstarts/rest/Streaming_REST.ipynb
|
||||
const chatPath =
|
||||
baseUrl.replace("generateContent", "streamGenerateContent") +
|
||||
(baseUrl.indexOf("?") > -1 ? "&alt=sse" : "?alt=sse");
|
||||
fetchEventSource(chatPath, {
|
||||
...chatPayload,
|
||||
async onopen(res) {
|
||||
clearTimeout(requestTimeoutId);
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log(
|
||||
"[Gemini] request response content type: ",
|
||||
contentType,
|
||||
);
|
||||
|
||||
console.log("Stream complete");
|
||||
// options.onFinish(responseText + remainText);
|
||||
finished = true;
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
partialData += decoder.decode(value, { stream: true });
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
responseText = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [responseText];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
let data = JSON.parse(ensureProperEnding(partialData));
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
const textArray = data.reduce(
|
||||
(acc: string[], item: { candidates: any[] }) => {
|
||||
const texts = item.candidates.map((candidate) =>
|
||||
candidate.content.parts
|
||||
.map((part: { text: any }) => part.text)
|
||||
.join(""),
|
||||
);
|
||||
return acc.concat(texts);
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
if (textArray.length > existingTexts.length) {
|
||||
const deltaArray = textArray.slice(existingTexts.length);
|
||||
existingTexts = textArray;
|
||||
remainText += deltaArray.join("");
|
||||
}
|
||||
} catch (error) {
|
||||
// console.log("[Response Animation] error: ", error,partialData);
|
||||
// skip error message when parsing json
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
return reader.read().then(processText);
|
||||
});
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error("Error:", error);
|
||||
});
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
responseText = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
if (msg.data === "[DONE]" || finished) {
|
||||
return finish();
|
||||
}
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const delta = apiClient.extractMessage(json);
|
||||
|
||||
if (delta) {
|
||||
remainText += delta;
|
||||
}
|
||||
|
||||
const blockReason = json?.promptFeedback?.blockReason;
|
||||
if (blockReason) {
|
||||
// being blocked
|
||||
console.log(`[Google] [Safety Ratings] result:`, blockReason);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} else {
|
||||
const res = await fetch(baseUrl, chatPayload);
|
||||
clearTimeout(requestTimeoutId);
|
||||
@@ -252,7 +274,7 @@ export class GeminiProApi implements LLMApi {
|
||||
),
|
||||
);
|
||||
}
|
||||
const message = this.extractMessage(resJson);
|
||||
const message = apiClient.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
}
|
||||
} catch (e) {
|
||||
|
@@ -1,13 +1,16 @@
|
||||
"use client";
|
||||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
DEFAULT_API_HOST,
|
||||
DEFAULT_MODELS,
|
||||
OpenaiPath,
|
||||
Azure,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
@@ -24,7 +27,6 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { makeAzurePath } from "@/app/azure";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
getMessageImages,
|
||||
@@ -40,7 +42,7 @@ export interface OpenAIListModelResponse {
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
export interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
@@ -62,33 +64,31 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
let baseUrl = "";
|
||||
|
||||
const isAzure = path.includes("deployments");
|
||||
if (accessStore.useCustomConfig) {
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
|
||||
if (isAzure && !accessStore.isValidAzure()) {
|
||||
throw Error(
|
||||
"incomplete azure config, please check it in your settings page",
|
||||
);
|
||||
}
|
||||
|
||||
if (isAzure) {
|
||||
path = makeAzurePath(path, accessStore.azureApiVersion);
|
||||
}
|
||||
|
||||
baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
}
|
||||
|
||||
if (baseUrl.length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/proxy" + ApiPath.OpenAI
|
||||
: ApiPath.OpenAI;
|
||||
const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI;
|
||||
baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath;
|
||||
}
|
||||
|
||||
if (baseUrl.endsWith("/")) {
|
||||
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
|
||||
}
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.OpenAI)) {
|
||||
if (
|
||||
!baseUrl.startsWith("http") &&
|
||||
!isAzure &&
|
||||
!baseUrl.startsWith(ApiPath.OpenAI)
|
||||
) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
@@ -113,6 +113,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
providerName: options.config.providerName,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -140,7 +141,35 @@ export class ChatGPTApi implements LLMApi {
|
||||
options.onController?.(controller);
|
||||
|
||||
try {
|
||||
const chatPath = this.path(OpenaiPath.ChatPath);
|
||||
let chatPath = "";
|
||||
if (modelConfig.providerName === ServiceProvider.Azure) {
|
||||
// find model, and get displayName as deployName
|
||||
const { models: configModels, customModels: configCustomModels } =
|
||||
useAppConfig.getState();
|
||||
const {
|
||||
defaultModel,
|
||||
customModels: accessCustomModels,
|
||||
useCustomConfig,
|
||||
} = useAccessStore.getState();
|
||||
const models = collectModelsWithDefaultModel(
|
||||
configModels,
|
||||
[configCustomModels, accessCustomModels].join(","),
|
||||
defaultModel,
|
||||
);
|
||||
const model = models.find(
|
||||
(model) =>
|
||||
model.name === modelConfig.model &&
|
||||
model?.provider?.providerName === ServiceProvider.Azure,
|
||||
);
|
||||
chatPath = this.path(
|
||||
Azure.ChatPath(
|
||||
(model?.displayName ?? model?.name) as string,
|
||||
useCustomConfig ? useAccessStore.getState().azureApiVersion : "",
|
||||
),
|
||||
);
|
||||
} else {
|
||||
chatPath = this.path(OpenaiPath.ChatPath);
|
||||
}
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
|
Reference in New Issue
Block a user