chore: resolve conflict

This commit is contained in:
Fred Liang
2023-12-24 02:15:30 +08:00
parent 75acd4c1aa
commit 778e88cb56
16 changed files with 630 additions and 74 deletions

View File

@@ -1,8 +1,13 @@
import { getClientConfig } from "../config/client";
import { ACCESS_CODE_PREFIX, Azure, ServiceProvider } from "../constant";
import { ChatMessage, ModelType, useAccessStore } from "../store";
import {
ACCESS_CODE_PREFIX,
Azure,
ModelProvider,
ServiceProvider,
} from "../constant";
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
import { ChatGPTApi } from "./platforms/openai";
import { GeminiApi } from "./platforms/google";
export const ROLES = ["system", "user", "assistant"] as const;
export type MessageRole = (typeof ROLES)[number];
@@ -40,7 +45,15 @@ export interface LLMUsage {
export interface LLMModel {
name: string;
displayName: string;
available: boolean;
provider: LLMModelProvider;
}
export interface LLMModelProvider {
id: string;
providerName: string;
providerType: string;
}
export abstract class LLMApi {
@@ -73,7 +86,11 @@ interface ChatProvider {
export class ClientApi {
public llm: LLMApi;
constructor() {
constructor(provider: ModelProvider = ModelProvider.GPT) {
if (provider === ModelProvider.Gemini) {
this.llm = new GeminiApi();
return;
}
this.llm = new ChatGPTApi();
}
@@ -123,8 +140,6 @@ export class ClientApi {
}
}
export const api = new ClientApi();
export function getHeaders() {
const accessStore = useAccessStore.getState();
const headers: Record<string, string> = {
@@ -132,9 +147,14 @@ export function getHeaders() {
"x-requested-with": "XMLHttpRequest",
};
const isGoogle = accessStore.provider === ServiceProvider.Google;
const isAzure = accessStore.provider === ServiceProvider.Azure;
const authHeader = isAzure ? "api-key" : "Authorization";
const apiKey = isAzure ? accessStore.azureApiKey : accessStore.openaiApiKey;
const apiKey = isGoogle
? accessStore.googleApiKey
: isAzure
? accessStore.azureApiKey
: accessStore.openaiApiKey;
const makeBearer = (s: string) => `${isAzure ? "" : "Bearer "}${s.trim()}`;
const validString = (x: string) => x && x.length > 0;

View File

@@ -0,0 +1,199 @@
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales";
export class GeminiApi implements LLMApi {
extractMessage(res: any) {
console.log("[Response] gemini response: ", res);
return (
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
res?.error?.message ||
""
);
}
async chat(options: ChatOptions): Promise<void> {
const messages = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "model"),
parts: [{ text: v.content }],
}));
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};
const accessStore = useAccessStore.getState();
const requestPayload = {
contents: messages,
// stream: options.config.stream,
// model: modelConfig.model,
// temperature: modelConfig.temperature,
// presence_penalty: modelConfig.presence_penalty,
// frequency_penalty: modelConfig.frequency_penalty,
// top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log("[Request] openai payload: ", requestPayload);
// todo: support stream later
const shouldStream = false;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath =
this.path(Google.ChatPath) + `?key=${accessStore.googleApiKey}`;
const chatPayload = {
method: "POST",
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (shouldStream) {
let responseText = "";
let remainText = "";
let finished = false;
// animate response to make it looks smooth
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
return;
}
if (remainText.length > 0) {
const fetchCount = Math.max(1, Math.round(remainText.length / 60));
const fetchText = remainText.slice(0, fetchCount);
responseText += fetchText;
remainText = remainText.slice(fetchCount);
options.onUpdate?.(responseText, fetchText);
}
requestAnimationFrame(animateResponseText);
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[OpenAI] request response content type: ",
contentType,
);
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
}
if (
!res.ok ||
!res.headers
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) {
const responseTexts = [responseText];
let extraInfo = await res.clone().text();
try {
const resJson = await res.clone().json();
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
}
},
onmessage(msg) {
if (msg.data === "[DONE]" || finished) {
return finish();
}
const text = msg.data;
try {
const json = JSON.parse(text) as {
choices: Array<{
delta: {
content: string;
};
}>;
};
const delta = json.choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text);
}
},
onclose() {
finish();
},
onerror(e) {
options.onError?.(e);
throw e;
},
openWhenHidden: true,
});
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
}
}
usage(): Promise<LLMUsage> {
throw new Error("Method not implemented.");
}
async models(): Promise<LLMModel[]> {
return [];
}
path(path: string): string {
return "/api/google/" + path;
}
}