From 5610f423d06a735737f833cbba7b182687f37ef8 Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 26 Sep 2023 00:19:21 +0800 Subject: [PATCH 01/26] feat: add multi-model support --- app/api/openai/[...path]/route.ts | 10 +- app/client/api.ts | 151 ----------- app/client/common/auth.ts | 28 ++ app/client/common/config.ts | 5 + app/client/{ => common}/controller.ts | 0 app/client/common/share.ts | 44 ++++ app/client/core.ts | 28 ++ app/client/index.ts | 2 + app/client/openai/config.ts | 20 ++ app/client/openai/index.ts | 295 ++++++++++++++++++++++ app/client/openai/types.ts | 79 ++++++ app/client/platforms/openai.ts | 281 --------------------- app/client/types.ts | 39 +++ app/components/auth.tsx | 19 +- app/components/chat-list.tsx | 8 +- app/components/chat.tsx | 59 +++-- app/components/config/index.tsx | 171 +++++++++++++ app/components/config/openai/model.tsx | 113 +++++++++ app/components/config/openai/provider.tsx | 71 ++++++ app/components/config/types.ts | 14 + app/components/emoji.tsx | 2 +- app/components/exporter.tsx | 17 +- app/components/home.tsx | 11 +- app/components/mask.tsx | 59 +++-- app/components/message-selector.tsx | 6 +- app/components/model-config.tsx | 84 +----- app/components/new-chat.tsx | 4 +- app/components/settings.tsx | 109 ++++---- app/constant.ts | 20 +- app/locales/ar.ts | 2 +- app/locales/bn.ts | 2 +- app/locales/cn.ts | 2 +- app/locales/cs.ts | 2 +- app/locales/de.ts | 2 +- app/locales/en.ts | 2 +- app/locales/es.ts | 2 +- app/locales/fr.ts | 2 +- app/locales/id.ts | 7 +- app/locales/it.ts | 2 +- app/locales/jp.ts | 5 +- app/locales/ko.ts | 2 +- app/locales/no.ts | 2 +- app/locales/ru.ts | 2 +- app/locales/tr.ts | 2 +- app/locales/tw.ts | 2 +- app/locales/vi.ts | 2 +- app/masks/typing.ts | 10 +- app/store/access.ts | 30 +-- app/store/chat.ts | 188 ++++++-------- app/store/config.ts | 157 +++++++----- app/store/mask.ts | 18 +- app/store/sync.ts | 4 +- app/store/update.ts | 73 +++--- app/typing.ts | 14 + app/utils/clone.ts | 2 +- app/utils/cloud/index.ts | 5 +- app/utils/cloud/upstash.ts | 2 +- app/utils/cors.ts | 20 +- app/utils/log.ts | 13 + app/utils/object.ts | 17 ++ app/utils/path.ts | 16 ++ app/utils/string.ts | 19 ++ 62 files changed, 1439 insertions(+), 940 deletions(-) delete mode 100644 app/client/api.ts create mode 100644 app/client/common/auth.ts create mode 100644 app/client/common/config.ts rename app/client/{ => common}/controller.ts (100%) create mode 100644 app/client/common/share.ts create mode 100644 app/client/core.ts create mode 100644 app/client/index.ts create mode 100644 app/client/openai/config.ts create mode 100644 app/client/openai/index.ts create mode 100644 app/client/openai/types.ts delete mode 100644 app/client/platforms/openai.ts create mode 100644 app/client/types.ts create mode 100644 app/components/config/index.tsx create mode 100644 app/components/config/openai/model.tsx create mode 100644 app/components/config/openai/provider.tsx create mode 100644 app/components/config/types.ts create mode 100644 app/utils/log.ts create mode 100644 app/utils/object.ts create mode 100644 app/utils/path.ts create mode 100644 app/utils/string.ts diff --git a/app/api/openai/[...path]/route.ts b/app/api/openai/[...path]/route.ts index 9df005a31..8dc36f433 100644 --- a/app/api/openai/[...path]/route.ts +++ b/app/api/openai/[...path]/route.ts @@ -1,4 +1,4 @@ -import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; +import { type OpenAI } from "@/app/client/openai/types"; import { getServerSideConfig } from "@/app/config/server"; import { OpenaiPath } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; @@ -6,9 +6,9 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; import { requestOpenai } from "../../common"; -const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); +const ALLOWD_PATH = new Set(Object.values(OpenaiPath) as string[]); -function getModels(remoteModelRes: OpenAIListModelResponse) { +function getModels(remoteModelRes: OpenAI.ListModelResponse) { const config = getServerSideConfig(); if (config.disableGPT4) { @@ -56,8 +56,8 @@ async function handle( const response = await requestOpenai(req); // list models - if (subpath === OpenaiPath.ListModelPath && response.status === 200) { - const resJson = (await response.json()) as OpenAIListModelResponse; + if (subpath === OpenaiPath.ListModel && response.status === 200) { + const resJson = await response.json(); const availableModels = getModels(resJson); return NextResponse.json(availableModels, { status: response.status, diff --git a/app/client/api.ts b/app/client/api.ts deleted file mode 100644 index b04dd88b8..000000000 --- a/app/client/api.ts +++ /dev/null @@ -1,151 +0,0 @@ -import { getClientConfig } from "../config/client"; -import { ACCESS_CODE_PREFIX } from "../constant"; -import { ChatMessage, ModelType, useAccessStore } from "../store"; -import { ChatGPTApi } from "./platforms/openai"; - -export const ROLES = ["system", "user", "assistant"] as const; -export type MessageRole = (typeof ROLES)[number]; - -export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; -export type ChatModel = ModelType; - -export interface RequestMessage { - role: MessageRole; - content: string; -} - -export interface LLMConfig { - model: string; - temperature?: number; - top_p?: number; - stream?: boolean; - presence_penalty?: number; - frequency_penalty?: number; -} - -export interface ChatOptions { - messages: RequestMessage[]; - config: LLMConfig; - - onUpdate?: (message: string, chunk: string) => void; - onFinish: (message: string) => void; - onError?: (err: Error) => void; - onController?: (controller: AbortController) => void; -} - -export interface LLMUsage { - used: number; - total: number; -} - -export interface LLMModel { - name: string; - available: boolean; -} - -export abstract class LLMApi { - abstract chat(options: ChatOptions): Promise; - abstract usage(): Promise; - abstract models(): Promise; -} - -type ProviderName = "openai" | "azure" | "claude" | "palm"; - -interface Model { - name: string; - provider: ProviderName; - ctxlen: number; -} - -interface ChatProvider { - name: ProviderName; - apiConfig: { - baseUrl: string; - apiKey: string; - summaryModel: Model; - }; - models: Model[]; - - chat: () => void; - usage: () => void; -} - -export class ClientApi { - public llm: LLMApi; - - constructor() { - this.llm = new ChatGPTApi(); - } - - config() {} - - prompts() {} - - masks() {} - - async share(messages: ChatMessage[], avatarUrl: string | null = null) { - const msgs = messages - .map((m) => ({ - from: m.role === "user" ? "human" : "gpt", - value: m.content, - })) - .concat([ - { - from: "human", - value: - "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web", - }, - ]); - // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 - // Please do not modify this message - - console.log("[Share]", messages, msgs); - const clientConfig = getClientConfig(); - const proxyUrl = "/sharegpt"; - const rawUrl = "https://sharegpt.com/api/conversations"; - const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl; - const res = await fetch(shareUrl, { - body: JSON.stringify({ - avatarUrl, - items: msgs, - }), - headers: { - "Content-Type": "application/json", - }, - method: "POST", - }); - - const resJson = await res.json(); - console.log("[Share]", resJson); - if (resJson.id) { - return `https://shareg.pt/${resJson.id}`; - } - } -} - -export const api = new ClientApi(); - -export function getHeaders() { - const accessStore = useAccessStore.getState(); - let headers: Record = { - "Content-Type": "application/json", - "x-requested-with": "XMLHttpRequest", - }; - - const makeBearer = (token: string) => `Bearer ${token.trim()}`; - const validString = (x: string) => x && x.length > 0; - - // use user's api key first - if (validString(accessStore.token)) { - headers.Authorization = makeBearer(accessStore.token); - } else if ( - accessStore.enabledAccessControl() && - validString(accessStore.accessCode) - ) { - headers.Authorization = makeBearer( - ACCESS_CODE_PREFIX + accessStore.accessCode, - ); - } - - return headers; -} diff --git a/app/client/common/auth.ts b/app/client/common/auth.ts new file mode 100644 index 000000000..9533ebfd2 --- /dev/null +++ b/app/client/common/auth.ts @@ -0,0 +1,28 @@ +import { getClientConfig } from "@/app/config/client"; +import { ACCESS_CODE_PREFIX } from "@/app/constant"; +import { useAccessStore } from "@/app/store"; + +export function bearer(value: string) { + return `Bearer ${value.trim()}`; +} + +export function getAuthHeaders(apiKey = "") { + const accessStore = useAccessStore.getState(); + const isApp = !!getClientConfig()?.isApp; + + let headers: Record = {}; + + if (apiKey) { + // use user's api key first + headers.Authorization = bearer(apiKey); + } else if ( + accessStore.enabledAccessControl() && + !isApp && + !!accessStore.accessCode + ) { + // or use access code + headers.Authorization = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); + } + + return headers; +} diff --git a/app/client/common/config.ts b/app/client/common/config.ts new file mode 100644 index 000000000..127773a4c --- /dev/null +++ b/app/client/common/config.ts @@ -0,0 +1,5 @@ +export const COMMON_PROVIDER_CONFIG = { + customModels: "", + models: [] as string[], + autoFetchModels: false, // fetch available models from server or not +}; diff --git a/app/client/controller.ts b/app/client/common/controller.ts similarity index 100% rename from app/client/controller.ts rename to app/client/common/controller.ts diff --git a/app/client/common/share.ts b/app/client/common/share.ts new file mode 100644 index 000000000..338e22cb2 --- /dev/null +++ b/app/client/common/share.ts @@ -0,0 +1,44 @@ +import { getClientConfig } from "@/app/config/client"; +import { ChatMessage } from "@/app/store"; + +export async function shareToShareGPT( + messages: ChatMessage[], + avatarUrl: string | null = null, +) { + const msgs = messages + .map((m) => ({ + from: m.role === "user" ? "human" : "gpt", + value: m.content, + })) + .concat([ + { + from: "human", + // 敬告二开开发者们,为了开源大模型的发展,请不要修改上述消息,此消息用于后续数据清洗使用 + // Please do not modify this message + value: + "Share from [ChatGPT Next Web]: https://github.com/Yidadaa/ChatGPT-Next-Web", + }, + ]); + + console.log("[Share]", messages, msgs); + const clientConfig = getClientConfig(); + const proxyUrl = "/sharegpt"; + const rawUrl = "https://sharegpt.com/api/conversations"; + const shareUrl = clientConfig?.isApp ? rawUrl : proxyUrl; + const res = await fetch(shareUrl, { + body: JSON.stringify({ + avatarUrl, + items: msgs, + }), + headers: { + "Content-Type": "application/json", + }, + method: "POST", + }); + + const resJson = await res.json(); + console.log("[Share]", resJson); + if (resJson.id) { + return `https://shareg.pt/${resJson.id}`; + } +} diff --git a/app/client/core.ts b/app/client/core.ts new file mode 100644 index 000000000..a75cf3fc0 --- /dev/null +++ b/app/client/core.ts @@ -0,0 +1,28 @@ +import { MaskConfig, ProviderConfig } from "../store"; +import { shareToShareGPT } from "./common/share"; +import { createOpenAiClient } from "./openai"; +import { ChatControllerPool } from "./common/controller"; + +export const LLMClients = { + openai: createOpenAiClient, +}; + +export function createLLMClient( + config: ProviderConfig, + maskConfig: MaskConfig, +) { + return LLMClients[maskConfig.provider as any as keyof typeof LLMClients]( + config, + maskConfig.modelConfig, + ); +} + +export function createApi() { + return { + createLLMClient, + shareToShareGPT, + controllerManager: ChatControllerPool, + }; +} + +export const api = createApi(); diff --git a/app/client/index.ts b/app/client/index.ts new file mode 100644 index 000000000..4e22af656 --- /dev/null +++ b/app/client/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./core"; diff --git a/app/client/openai/config.ts b/app/client/openai/config.ts new file mode 100644 index 000000000..b27534162 --- /dev/null +++ b/app/client/openai/config.ts @@ -0,0 +1,20 @@ +import { COMMON_PROVIDER_CONFIG } from "../common/config"; + +export const OpenAIConfig = { + model: { + model: "gpt-3.5-turbo" as string, + summarizeModel: "gpt-3.5-turbo", + + temperature: 0.5, + top_p: 1, + max_tokens: 2000, + presence_penalty: 0, + frequency_penalty: 0, + }, + provider: { + name: "OpenAI", + endpoint: "https://api.openai.com", + apiKey: "", + ...COMMON_PROVIDER_CONFIG, + }, +}; diff --git a/app/client/openai/index.ts b/app/client/openai/index.ts new file mode 100644 index 000000000..a452936de --- /dev/null +++ b/app/client/openai/index.ts @@ -0,0 +1,295 @@ +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; + +import { + API_PREFIX, + ApiPath, + DEFAULT_MODELS, + OpenaiPath, +} from "@/app/constant"; +import { ModelConfig, ProviderConfig } from "@/app/store"; + +import { OpenAI } from "./types"; + +import { ChatOptions, LLMModel, LLMUsage } from "../types"; +import Locale from "@/app/locales"; + +import { prettyObject } from "@/app/utils/format"; +import { getApiPath } from "@/app/utils/path"; +import { trimEnd } from "@/app/utils/string"; +import { omit } from "@/app/utils/object"; +import { createLogger } from "@/app/utils/log"; +import { getAuthHeaders } from "../common/auth"; + +export function createOpenAiClient( + providerConfigs: ProviderConfig, + modelConfig: ModelConfig, +) { + const openaiConfig = { ...providerConfigs.openai }; + const logger = createLogger("[OpenAI Client]"); + const openaiModelConfig = { ...modelConfig.openai }; + + return { + headers() { + return { + "Content-Type": "application/json", + ...getAuthHeaders(openaiConfig.apiKey), + }; + }, + + path(path: OpenaiPath): string { + let baseUrl = openaiConfig.endpoint; + + // if endpoint is empty, use default endpoint + if (baseUrl.trim().length === 0) { + baseUrl = getApiPath(ApiPath.OpenAI); + } + + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) { + baseUrl = "https://" + baseUrl; + } + + baseUrl = trimEnd(baseUrl, "/"); + + return `${baseUrl}/${path}`; + }, + + extractMessage(res: OpenAI.ChatCompletionResponse) { + return res.choices[0]?.message?.content ?? ""; + }, + + beforeRequest(options: ChatOptions, stream = false) { + const messages = options.messages.map((v) => ({ + role: v.role, + content: v.content, + })); + + if (options.shouldSummarize) { + openaiModelConfig.model = openaiModelConfig.summarizeModel; + } + + const requestBody: OpenAI.ChatCompletionRequest = { + messages, + stream, + ...omit(openaiModelConfig, "summarizeModel"), + }; + + const path = this.path(OpenaiPath.Chat); + + logger.log("path = ", path, requestBody); + + const controller = new AbortController(); + options.onController?.(controller); + + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: this.headers(), + }; + + return { + path, + payload, + controller, + }; + }, + + async chat(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest( + options, + false, + ); + + controller.signal.onabort = () => options.onFinish(""); + + const res = await fetch(path, payload); + const resJson = await res.json(); + + const message = this.extractMessage(resJson); + options.onFinish(message); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async chatStream(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest(options, true); + + const context = { + text: "", + finished: false, + }; + + const finish = () => { + if (!context.finished) { + options.onFinish(context.text); + context.finished = true; + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(path, { + ...payload, + async onopen(res) { + const contentType = res.headers.get("content-type"); + logger.log("response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + context.text = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [context.text]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + context.text = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || context.finished) { + return finish(); + } + const chunk = msg.data; + try { + const chunkJson = JSON.parse( + chunk, + ) as OpenAI.ChatCompletionStreamResponse; + const delta = chunkJson.choices[0].delta.content; + if (delta) { + context.text += delta; + options.onUpdate?.(context.text, delta); + } + } catch (e) { + logger.error("[Request] parse error", chunk, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + }, + openWhenHidden: true, + }); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async usage() { + const formatDate = (d: Date) => + `${d.getFullYear()}-${(d.getMonth() + 1) + .toString() + .padStart(2, "0")}-${d.getDate().toString().padStart(2, "0")}`; + const ONE_DAY = 1 * 24 * 60 * 60 * 1000; + const now = new Date(); + const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); + const startDate = formatDate(startOfMonth); + const endDate = formatDate(new Date(Date.now() + ONE_DAY)); + + const [used, subs] = await Promise.all([ + fetch( + `${this.path( + OpenaiPath.Usage, + )}?start_date=${startDate}&end_date=${endDate}`, + { + method: "GET", + headers: this.headers(), + }, + ), + fetch(this.path(OpenaiPath.Subs), { + method: "GET", + headers: this.headers(), + }), + ]); + + if (!used.ok || !subs.ok) { + throw new Error("Failed to query usage from openai"); + } + + const response = (await used.json()) as { + total_usage?: number; + error?: { + type: string; + message: string; + }; + }; + + const total = (await subs.json()) as { + hard_limit_usd?: number; + }; + + if (response.error?.type) { + throw Error(response.error?.message); + } + + response.total_usage = Math.round(response.total_usage ?? 0) / 100; + total.hard_limit_usd = + Math.round((total.hard_limit_usd ?? 0) * 100) / 100; + + return { + used: response.total_usage, + total: total.hard_limit_usd, + } as LLMUsage; + }, + + async models(): Promise { + const customModels = openaiConfig.customModels + .split(",") + .map((v) => v.trim()) + .map((v) => ({ + name: v, + available: true, + })); + + if (!openaiConfig.autoFetchModels) { + return [...DEFAULT_MODELS.slice(), ...customModels]; + } + + const res = await fetch(this.path(OpenaiPath.ListModel), { + method: "GET", + headers: this.headers(), + }); + + const resJson = (await res.json()) as OpenAI.ListModelResponse; + const chatModels = + resJson.data?.filter((m) => m.id.startsWith("gpt-")) ?? []; + + return chatModels + .map((m) => ({ + name: m.id, + available: true, + })) + .concat(customModels); + }, + }; +} diff --git a/app/client/openai/types.ts b/app/client/openai/types.ts new file mode 100644 index 000000000..d1383922d --- /dev/null +++ b/app/client/openai/types.ts @@ -0,0 +1,79 @@ +export namespace OpenAI { + export type Role = "system" | "user" | "assistant" | "function"; + export type FinishReason = "stop" | "length" | "function_call"; + + export interface Message { + role: Role; + content?: string; + function_call?: { + name: string; + arguments: string; + }; + } + + export interface Function { + name: string; + description?: string; + parameters: object; + } + + export interface ListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; + } + + export interface ChatCompletionChoice { + index: number; + message: Message; + finish_reason: FinishReason; + } + + export interface ChatCompletionUsage { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; + } + + export interface ChatCompletionResponse { + id: string; + object: string; + created: number; + model: string; + choices: ChatCompletionChoice[]; + usage: ChatCompletionUsage; + } + + export interface ChatCompletionChunkChoice { + index: number; + delta: Message; + finish_reason?: FinishReason; + } + + export interface ChatCompletionStreamResponse { + object: string; + created: number; + model: string; + choices: ChatCompletionChunkChoice[]; + } + + export interface ChatCompletionRequest { + model: string; + messages: Message[]; + + functions?: Function[]; + function_call?: "none" | "auto"; + + temperature?: number; + top_p?: number; + n?: number; + stream?: boolean; + stop?: string | string[]; + max_tokens?: number; + presence_penalty?: number; + frequency_penalty?: number; + } +} diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts deleted file mode 100644 index fd4eb59ce..000000000 --- a/app/client/platforms/openai.ts +++ /dev/null @@ -1,281 +0,0 @@ -import { - DEFAULT_API_HOST, - DEFAULT_MODELS, - OpenaiPath, - REQUEST_TIMEOUT_MS, -} from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; - -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; -import { getClientConfig } from "@/app/config/client"; - -export interface OpenAIListModelResponse { - object: string; - data: Array<{ - id: string; - object: string; - root: string; - }>; -} - -export class ChatGPTApi implements LLMApi { - private disableListModels = true; - - path(path: string): string { - let openaiUrl = useAccessStore.getState().openaiUrl; - const apiPath = "/api/openai"; - - if (openaiUrl.length === 0) { - const isApp = !!getClientConfig()?.isApp; - openaiUrl = isApp ? DEFAULT_API_HOST : apiPath; - } - if (openaiUrl.endsWith("/")) { - openaiUrl = openaiUrl.slice(0, openaiUrl.length - 1); - } - if (!openaiUrl.startsWith("http") && !openaiUrl.startsWith(apiPath)) { - openaiUrl = "https://" + openaiUrl; - } - return [openaiUrl, path].join("/"); - } - - extractMessage(res: any) { - return res.choices?.at(0)?.message?.content ?? ""; - } - - async chat(options: ChatOptions) { - const messages = options.messages.map((v) => ({ - role: v.role, - content: v.content, - })); - - const modelConfig = { - ...useAppConfig.getState().modelConfig, - ...useChatStore.getState().currentSession().mask.modelConfig, - ...{ - model: options.config.model, - }, - }; - - const requestPayload = { - messages, - stream: options.config.stream, - model: modelConfig.model, - temperature: modelConfig.temperature, - presence_penalty: modelConfig.presence_penalty, - frequency_penalty: modelConfig.frequency_penalty, - top_p: modelConfig.top_p, - }; - - console.log("[Request] openai payload: ", requestPayload); - - const shouldStream = !!options.config.stream; - const controller = new AbortController(); - options.onController?.(controller); - - try { - const chatPath = this.path(OpenaiPath.ChatPath); - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - REQUEST_TIMEOUT_MS, - ); - - if (shouldStream) { - let responseText = ""; - let finished = false; - - const finish = () => { - if (!finished) { - options.onFinish(responseText); - finished = true; - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const delta = json.choices[0].delta.content; - if (delta) { - responseText += delta; - options.onUpdate?.(responseText, delta); - } - } catch (e) { - console.error("[Request] parse error", text, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } else { - const res = await fetch(chatPath, chatPayload); - clearTimeout(requestTimeoutId); - - const resJson = await res.json(); - const message = this.extractMessage(resJson); - options.onFinish(message); - } - } catch (e) { - console.log("[Request] failed to make a chat request", e); - options.onError?.(e as Error); - } - } - async usage() { - const formatDate = (d: Date) => - `${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d - .getDate() - .toString() - .padStart(2, "0")}`; - const ONE_DAY = 1 * 24 * 60 * 60 * 1000; - const now = new Date(); - const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); - const startDate = formatDate(startOfMonth); - const endDate = formatDate(new Date(Date.now() + ONE_DAY)); - - const [used, subs] = await Promise.all([ - fetch( - this.path( - `${OpenaiPath.UsagePath}?start_date=${startDate}&end_date=${endDate}`, - ), - { - method: "GET", - headers: getHeaders(), - }, - ), - fetch(this.path(OpenaiPath.SubsPath), { - method: "GET", - headers: getHeaders(), - }), - ]); - - if (used.status === 401) { - throw new Error(Locale.Error.Unauthorized); - } - - if (!used.ok || !subs.ok) { - throw new Error("Failed to query usage from openai"); - } - - const response = (await used.json()) as { - total_usage?: number; - error?: { - type: string; - message: string; - }; - }; - - const total = (await subs.json()) as { - hard_limit_usd?: number; - }; - - if (response.error && response.error.type) { - throw Error(response.error.message); - } - - if (response.total_usage) { - response.total_usage = Math.round(response.total_usage) / 100; - } - - if (total.hard_limit_usd) { - total.hard_limit_usd = Math.round(total.hard_limit_usd * 100) / 100; - } - - return { - used: response.total_usage, - total: total.hard_limit_usd, - } as LLMUsage; - } - - async models(): Promise { - if (this.disableListModels) { - return DEFAULT_MODELS.slice(); - } - - const res = await fetch(this.path(OpenaiPath.ListModelPath), { - method: "GET", - headers: { - ...getHeaders(), - }, - }); - - const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); - console.log("[Models]", chatModels); - - if (!chatModels) { - return []; - } - - return chatModels.map((m) => ({ - name: m.id, - available: true, - })); - } -} -export { OpenaiPath }; diff --git a/app/client/types.ts b/app/client/types.ts new file mode 100644 index 000000000..694059e1c --- /dev/null +++ b/app/client/types.ts @@ -0,0 +1,39 @@ +import { DEFAULT_MODELS } from "../constant"; + +export interface LLMUsage { + used: number; + total: number; + available: boolean; +} + +export interface LLMModel { + name: string; + available: boolean; +} + +export const ROLES = ["system", "user", "assistant"] as const; +export type MessageRole = (typeof ROLES)[number]; + +export type ChatModel = (typeof DEFAULT_MODELS)[number]["name"]; + +export interface RequestMessage { + role: MessageRole; + content: string; +} + +export interface ChatOptions { + messages: RequestMessage[]; + shouldSummarize?: boolean; + + onUpdate?: (message: string, chunk: string) => void; + onFinish: (message: string) => void; + onError?: (err: Error) => void; + onController?: (controller: AbortController) => void; +} + +export type LLMClient = { + chat(options: ChatOptions): Promise; + chatStream(options: ChatOptions): Promise; + usage(): Promise; + models(): Promise; +}; diff --git a/app/components/auth.tsx b/app/components/auth.tsx index b82d0e894..b13a695d6 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -3,7 +3,7 @@ import { IconButton } from "./button"; import { useNavigate } from "react-router-dom"; import { Path } from "../constant"; -import { useAccessStore } from "../store"; +import { useAccessStore, useAppConfig, useChatStore } from "../store"; import Locale from "../locales"; import BotIcon from "../icons/bot.svg"; @@ -13,10 +13,14 @@ import { getClientConfig } from "../config/client"; export function AuthPage() { const navigate = useNavigate(); const access = useAccessStore(); + const config = useAppConfig(); const goHome = () => navigate(Path.Home); const goChat = () => navigate(Path.Chat); - const resetAccessCode = () => { access.updateCode(""); access.updateToken(""); }; // Reset access code to empty string + const resetAccessCode = () => { + access.update((config) => (config.accessCode = "")); + config.update((config) => (config.providerConfig.openai.apiKey = "")); + }; // Reset access code to empty string useEffect(() => { if (getClientConfig()?.isApp) { @@ -40,7 +44,9 @@ export function AuthPage() { placeholder={Locale.Auth.Input} value={access.accessCode} onChange={(e) => { - access.updateCode(e.currentTarget.value); + access.update( + (config) => (config.accessCode = e.currentTarget.value), + ); }} /> {!access.hideUserApiKey ? ( @@ -50,9 +56,12 @@ export function AuthPage() { className={styles["auth-input"]} type="password" placeholder={Locale.Settings.Token.Placeholder} - value={access.token} + value={config.providerConfig.openai.apiKey} onChange={(e) => { - access.updateToken(e.currentTarget.value); + config.update( + (config) => + (config.providerConfig.openai.apiKey = e.currentTarget.value), + ); }} /> diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 7ba555852..b27430e65 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -39,6 +39,9 @@ export function ChatItem(props: { }); } }, [props.selected]); + + const modelConfig = useChatStore().extractModelConfig(props.mask.config); + return ( {(provided) => ( @@ -60,7 +63,10 @@ export function ChatItem(props: { {props.narrow ? (
- +
{props.count} diff --git a/app/components/chat.tsx b/app/components/chat.tsx index cca096eb8..7b7b66bec 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -1,12 +1,5 @@ import { useDebouncedCallback } from "use-debounce"; -import React, { - useState, - useRef, - useEffect, - useMemo, - useCallback, - Fragment, -} from "react"; +import React, { useState, useRef, useEffect, useMemo, Fragment } from "react"; import SendWhiteIcon from "../icons/send-white.svg"; import BrainIcon from "../icons/brain.svg"; @@ -37,15 +30,12 @@ import RobotIcon from "../icons/robot.svg"; import { ChatMessage, - SubmitKey, useChatStore, BOT_HELLO, createMessage, useAccessStore, - Theme, useAppConfig, DEFAULT_TOPIC, - ModelType, } from "../store"; import { @@ -57,7 +47,7 @@ import { import dynamic from "next/dynamic"; -import { ChatControllerPool } from "../client/controller"; +import { ChatControllerPool } from "../client/common/controller"; import { Prompt, usePromptStore } from "../store/prompt"; import Locale from "../locales"; @@ -73,11 +63,10 @@ import { showPrompt, showToast, } from "./ui-lib"; -import { useLocation, useNavigate } from "react-router-dom"; +import { useNavigate } from "react-router-dom"; import { CHAT_PAGE_SIZE, LAST_INPUT_KEY, - MAX_RENDER_MSG_COUNT, Path, REQUEST_TIMEOUT_MS, UNFINISHED_INPUT, @@ -89,6 +78,8 @@ import { ChatCommandPrefix, useChatCommand, useCommand } from "../command"; import { prettyObject } from "../utils/format"; import { ExportMessageModal } from "./exporter"; import { getClientConfig } from "../config/client"; +import { deepClone } from "../utils/clone"; +import { SubmitKey, Theme } from "../typing"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -142,7 +133,7 @@ export function SessionConfigModel(props: { onClose: () => void }) { }} shouldSyncFromGlobal extraListItems={ - session.mask.modelConfig.sendMemory ? ( + session.mask.config.chatConfig.sendMemory ? ( ChatControllerPool.stopAll(); + const client = chatStore.getClient(); + const modelConfig = chatStore.getCurrentModelConfig(); + const currentModel = modelConfig.model; // switch model - const currentModel = chatStore.currentSession().mask.modelConfig.model; - const models = useMemo( - () => - config - .allModels() - .filter((m) => m.available) - .map((m) => m.name), - [config], - ); + const [models, setModels] = useState([]); + useEffect(() => { + client + .models() + .then((_models) => + setModels(_models.filter((v) => v.available).map((v) => v.name)), + ); + }, []); const [showModelSelector, setShowModelSelector] = useState(false); return ( @@ -526,7 +519,7 @@ export function ChatActions(props: { onSelection={(s) => { if (s.length === 0) return; chatStore.updateCurrentSession((session) => { - session.mask.modelConfig.model = s[0] as ModelType; + chatStore.extractModelConfig(session.mask.config).model = s[0]; session.mask.syncGlobalConfig = false; }); showToast(s[0]); @@ -603,6 +596,9 @@ function _Chat() { type RenderMessage = ChatMessage & { preview?: boolean }; const chatStore = useChatStore(); + const modelConfig = chatStore.getCurrentModelConfig(); + const maskConfig = chatStore.getCurrentMaskConfig(); + const session = chatStore.currentSession(); const config = useAppConfig(); const fontSize = config.fontSize; @@ -747,7 +743,7 @@ function _Chat() { // auto sync mask config from global config if (session.mask.syncGlobalConfig) { console.log("[Mask] syncing from global, name = ", session.mask.name); - session.mask.modelConfig = { ...config.modelConfig }; + session.mask.config = deepClone(config.globalMaskConfig); } }); // eslint-disable-next-line react-hooks/exhaustive-deps @@ -979,7 +975,7 @@ function _Chat() { console.log("[Command] got code from url: ", text); showConfirm(Locale.URLCommand.Code + `code = ${text}`).then((res) => { if (res) { - accessStore.updateCode(text); + accessStore.update((config) => (config.accessCode = text)); } }); }, @@ -999,10 +995,10 @@ function _Chat() { ).then((res) => { if (!res) return; if (payload.key) { - accessStore.updateToken(payload.key); + // TODO: auto-fill openai api key here, must specific provider type } if (payload.url) { - accessStore.updateOpenAiUrl(payload.url); + // TODO: auto-fill openai url here, must specific provider type } }); } @@ -1159,7 +1155,10 @@ function _Chat() { {["system"].includes(message.role) ? ( ) : ( - + )} )} diff --git a/app/components/config/index.tsx b/app/components/config/index.tsx new file mode 100644 index 000000000..b08fe0608 --- /dev/null +++ b/app/components/config/index.tsx @@ -0,0 +1,171 @@ +import { + ChatConfig, + LLMProvider, + LLMProviders, + ModelConfig, + ProviderConfig, +} from "@/app/store"; +import { Updater } from "@/app/typing"; +import { OpenAIModelConfig } from "./openai/model"; +import { OpenAIProviderConfig } from "./openai/provider"; +import { ListItem, Select } from "../ui-lib"; +import Locale from "@/app/locales"; +import { InputRange } from "../input-range"; + +export function ModelConfigList(props: { + provider: LLMProvider; + config: ModelConfig; + updateConfig: Updater; +}) { + if (props.provider === "openai") { + return ( + { + props.updateConfig((config) => update(config.openai)); + }} + models={[ + { + name: "gpt-3.5-turbo", + available: true, + }, + { + name: "gpt-4", + available: true, + }, + ]} + /> + ); + } + + return null; +} + +export function ProviderConfigList(props: { + provider: LLMProvider; + config: ProviderConfig; + updateConfig: Updater; +}) { + if (props.provider === "openai") { + return ( + { + props.updateConfig((config) => update(config.openai)); + }} + /> + ); + } + + return null; +} + +export function ProviderSelectItem(props: { + value: LLMProvider; + update: (value: LLMProvider) => void; +}) { + return ( + + + + ); +} + +export function ChatConfigList(props: { + config: ChatConfig; + updateConfig: (updater: (config: ChatConfig) => void) => void; +}) { + return ( + <> + + + props.updateConfig( + (config) => + (config.enableInjectSystemPrompts = e.currentTarget.checked), + ) + } + > + + + + + props.updateConfig( + (config) => (config.template = e.currentTarget.value), + ) + } + > + + + + + props.updateConfig( + (config) => (config.historyMessageCount = e.target.valueAsNumber), + ) + } + > + + + + + props.updateConfig( + (config) => + (config.compressMessageLengthThreshold = + e.currentTarget.valueAsNumber), + ) + } + > + + + + props.updateConfig( + (config) => (config.sendMemory = e.currentTarget.checked), + ) + } + > + + + ); +} diff --git a/app/components/config/openai/model.tsx b/app/components/config/openai/model.tsx new file mode 100644 index 000000000..acd5b74e4 --- /dev/null +++ b/app/components/config/openai/model.tsx @@ -0,0 +1,113 @@ +import { ModelConfig } from "@/app/store"; +import { ModelConfigProps } from "../types"; +import { ListItem, Select } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { InputRange } from "../../input-range"; + +export function OpenAIModelConfig( + props: ModelConfigProps, +) { + return ( + <> + + + + + { + props.updateConfig( + (config) => (config.temperature = e.currentTarget.valueAsNumber), + ); + }} + > + + + { + props.updateConfig( + (config) => (config.top_p = e.currentTarget.valueAsNumber), + ); + }} + > + + + + props.updateConfig( + (config) => (config.max_tokens = e.currentTarget.valueAsNumber), + ) + } + > + + + { + props.updateConfig( + (config) => + (config.presence_penalty = e.currentTarget.valueAsNumber), + ); + }} + > + + + + { + props.updateConfig( + (config) => + (config.frequency_penalty = e.currentTarget.valueAsNumber), + ); + }} + > + + + ); +} diff --git a/app/components/config/openai/provider.tsx b/app/components/config/openai/provider.tsx new file mode 100644 index 000000000..b905b130d --- /dev/null +++ b/app/components/config/openai/provider.tsx @@ -0,0 +1,71 @@ +import { ProviderConfig } from "@/app/store"; +import { ProviderConfigProps } from "../types"; +import { ListItem, PasswordInput } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { REMOTE_API_HOST } from "@/app/constant"; + +export function OpenAIProviderConfig( + props: ProviderConfigProps, +) { + return ( + <> + + + props.updateConfig( + (config) => (config.endpoint = e.currentTarget.value), + ) + } + > + + + { + props.updateConfig( + (config) => (config.apiKey = e.currentTarget.value), + ); + }} + /> + + + + props.updateConfig( + (config) => (config.customModels = e.currentTarget.value), + ) + } + > + + + + + props.updateConfig( + (config) => (config.autoFetchModels = e.currentTarget.checked), + ) + } + > + + + ); +} diff --git a/app/components/config/types.ts b/app/components/config/types.ts new file mode 100644 index 000000000..529e60fa8 --- /dev/null +++ b/app/components/config/types.ts @@ -0,0 +1,14 @@ +import { LLMModel } from "@/app/client"; +import { Updater } from "@/app/typing"; + +export type ModelConfigProps = { + models: LLMModel[]; + config: T; + updateConfig: Updater; +}; + +export type ProviderConfigProps = { + readonly?: boolean; + config: T; + updateConfig: Updater; +}; diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 03aac05f2..6f4dc62a9 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -28,7 +28,7 @@ export function AvatarPicker(props: { ); } -export function Avatar(props: { model?: ModelType; avatar?: string }) { +export function Avatar(props: { model?: string; avatar?: string }) { if (props.model) { return (
diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index 0a885d874..7cba87a8d 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -27,12 +27,12 @@ import { Avatar } from "./emoji"; import dynamic from "next/dynamic"; import NextImage from "next/image"; -import { toBlob, toJpeg, toPng } from "html-to-image"; +import { toBlob, toPng } from "html-to-image"; import { DEFAULT_MASK_AVATAR } from "../store/mask"; -import { api } from "../client/api"; import { prettyObject } from "../utils/format"; import { EXPORT_MESSAGE_CLASS_NAME } from "../constant"; import { getClientConfig } from "../config/client"; +import { api } from "../client"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -290,7 +290,7 @@ export function PreviewActions(props: { setShouldExport(false); api - .share(msgs) + .shareToShareGPT(msgs) .then((res) => { if (!res) return; showModal({ @@ -403,6 +403,7 @@ export function ImagePreviewer(props: { const chatStore = useChatStore(); const session = chatStore.currentSession(); const mask = session.mask; + const modelConfig = chatStore.getCurrentModelConfig(); const config = useAppConfig(); const previewRef = useRef(null); @@ -437,13 +438,13 @@ export function ImagePreviewer(props: { showToast(Locale.Export.Image.Toast); const dom = previewRef.current; if (!dom) return; - + const isApp = getClientConfig()?.isApp; - + try { const blob = await toPng(dom); if (!blob) return; - + if (isMobile || (isApp && window.__TAURI__)) { if (isApp && window.__TAURI__) { const result = await window.__TAURI__.dialog.save({ @@ -459,7 +460,7 @@ export function ImagePreviewer(props: { }, ], }); - + if (result !== null) { const response = await fetch(blob); const buffer = await response.arrayBuffer(); @@ -526,7 +527,7 @@ export function ImagePreviewer(props: {
- {Locale.Exporter.Model}: {mask.modelConfig.model} + {Locale.Exporter.Model}: {modelConfig.model}
{Locale.Exporter.Messages}: {props.messages.length} diff --git a/app/components/home.tsx b/app/components/home.tsx index 811cbdf51..1fc737952 100644 --- a/app/components/home.tsx +++ b/app/components/home.tsx @@ -27,7 +27,6 @@ import { SideBar } from "./sidebar"; import { useAppConfig } from "../store/config"; import { AuthPage } from "./auth"; import { getClientConfig } from "../config/client"; -import { api } from "../client/api"; import { useAccessStore } from "../store"; export function Loading(props: { noLogo?: boolean }) { @@ -128,7 +127,8 @@ function Screen() { const isHome = location.pathname === Path.Home; const isAuth = location.pathname === Path.Auth; const isMobileScreen = useMobileScreen(); - const shouldTightBorder = getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen); + const shouldTightBorder = + getClientConfig()?.isApp || (config.tightBorder && !isMobileScreen); useEffect(() => { loadAsyncGoogleFont(); @@ -170,10 +170,7 @@ export function useLoadData() { const config = useAppConfig(); useEffect(() => { - (async () => { - const models = await api.llm.models(); - config.mergeModels(models); - })(); + // TODO: fetch available models from server // eslint-disable-next-line react-hooks/exhaustive-deps }, []); } @@ -185,7 +182,7 @@ export function Home() { useEffect(() => { console.log("[Config] got config from build time", getClientConfig()); - useAccessStore.getState().fetch(); + useAccessStore.getState().fetchConfig(); }, []); if (!useHasHydrated()) { diff --git a/app/components/mask.tsx b/app/components/mask.tsx index 9fe1d485a..1dc04c71a 100644 --- a/app/components/mask.tsx +++ b/app/components/mask.tsx @@ -21,7 +21,6 @@ import { useAppConfig, useChatStore, } from "../store"; -import { ROLES } from "../client/api"; import { Input, List, @@ -36,19 +35,20 @@ import Locale, { AllLangs, ALL_LANG_OPTIONS, Lang } from "../locales"; import { useNavigate } from "react-router-dom"; import chatStyle from "./chat.module.scss"; -import { useEffect, useState } from "react"; +import { useState } from "react"; import { copyToClipboard, downloadAs, readFromFile } from "../utils"; import { Updater } from "../typing"; -import { ModelConfigList } from "./model-config"; import { FileName, Path } from "../constant"; import { BUILTIN_MASK_STORE } from "../masks"; -import { nanoid } from "nanoid"; import { DragDropContext, Droppable, Draggable, OnDragEndResponder, } from "@hello-pangea/dnd"; +import { ROLES } from "../client"; +import { deepClone } from "../utils/clone"; +import { ChatConfigList, ModelConfigList, ProviderSelectItem } from "./config"; // drag and drop helper function function reorder(list: T[], startIndex: number, endIndex: number): T[] { @@ -58,11 +58,11 @@ function reorder(list: T[], startIndex: number, endIndex: number): T[] { return result; } -export function MaskAvatar(props: { mask: Mask }) { - return props.mask.avatar !== DEFAULT_MASK_AVATAR ? ( - +export function MaskAvatar(props: { avatar: string; model: string }) { + return props.avatar !== DEFAULT_MASK_AVATAR ? ( + ) : ( - + ); } @@ -74,14 +74,15 @@ export function MaskConfig(props: { shouldSyncFromGlobal?: boolean; }) { const [showPicker, setShowPicker] = useState(false); + const modelConfig = useChatStore().extractModelConfig(props.mask.config); const updateConfig = (updater: (config: ModelConfig) => void) => { if (props.readonly) return; - const config = { ...props.mask.modelConfig }; - updater(config); + const config = deepClone(props.mask.config); + updater(config.modelConfig); props.updateMask((mask) => { - mask.modelConfig = config; + mask.config = config; // if user changed current session mask, it will disable auto sync mask.syncGlobalConfig = false; }); @@ -123,7 +124,10 @@ export function MaskConfig(props: { onClick={() => setShowPicker(true)} style={{ cursor: "pointer" }} > - +
@@ -182,7 +186,7 @@ export function MaskConfig(props: { ) { props.updateMask((mask) => { mask.syncGlobalConfig = checked; - mask.modelConfig = { ...globalConfig.modelConfig }; + mask.config = deepClone(globalConfig.globalMaskConfig); }); } else if (!checked) { props.updateMask((mask) => { @@ -196,10 +200,28 @@ export function MaskConfig(props: { + { + props.updateMask((mask) => (mask.config.provider = value)); + }} + /> + + + + { + const chatConfig = deepClone(props.mask.config.chatConfig); + updater(chatConfig); + props.updateMask((mask) => (mask.config.chatConfig = chatConfig)); + }} + /> {props.extraListItems} @@ -398,7 +420,7 @@ export function MaskPage() { setSearchText(text); if (text.length > 0) { const result = allMasks.filter((m) => - m.name.toLowerCase().includes(text.toLowerCase()) + m.name.toLowerCase().includes(text.toLowerCase()), ); setSearchMasks(result); } else { @@ -523,14 +545,17 @@ export function MaskPage() {
- +
{m.name}
{`${Locale.Mask.Item.Info(m.context.length)} / ${ ALL_LANG_OPTIONS[m.lang] - } / ${m.modelConfig.model}`} + } / ${chatStore.extractModelConfig(m.config).model}`}
diff --git a/app/components/message-selector.tsx b/app/components/message-selector.tsx index cadf52e64..9a2c4cbff 100644 --- a/app/components/message-selector.tsx +++ b/app/components/message-selector.tsx @@ -71,6 +71,7 @@ export function MessageSelector(props: { onSelected?: (messages: ChatMessage[]) => void; }) { const chatStore = useChatStore(); + const modelConfig = chatStore.getCurrentModelConfig(); const session = chatStore.currentSession(); const isValid = (m: ChatMessage) => m.content && !m.isError && !m.streaming; const messages = session.messages.filter( @@ -195,7 +196,10 @@ export function MessageSelector(props: { {m.role === "user" ? ( ) : ( - + )}
diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx index 63950a40d..00734382c 100644 --- a/app/components/model-config.tsx +++ b/app/components/model-config.tsx @@ -4,10 +4,12 @@ import Locale from "../locales"; import { InputRange } from "./input-range"; import { ListItem, Select } from "./ui-lib"; -export function ModelConfigList(props: { +export function _ModelConfigList(props: { modelConfig: ModelConfig; updateConfig: (updater: (config: ModelConfig) => void) => void; }) { + return null; + /* const config = useAppConfig(); return ( @@ -130,84 +132,8 @@ export function ModelConfigList(props: { > - - - props.updateConfig( - (config) => - (config.enableInjectSystemPrompts = e.currentTarget.checked), - ) - } - > - - - - - props.updateConfig( - (config) => (config.template = e.currentTarget.value), - ) - } - > - - - - - props.updateConfig( - (config) => (config.historyMessageCount = e.target.valueAsNumber), - ) - } - > - - - - - props.updateConfig( - (config) => - (config.compressMessageLengthThreshold = - e.currentTarget.valueAsNumber), - ) - } - > - - - - props.updateConfig( - (config) => (config.sendMemory = e.currentTarget.checked), - ) - } - > - + ); + */ } diff --git a/app/components/new-chat.tsx b/app/components/new-chat.tsx index 76cbbeeb1..dac918e12 100644 --- a/app/components/new-chat.tsx +++ b/app/components/new-chat.tsx @@ -29,9 +29,11 @@ function getIntersectionArea(aRect: DOMRect, bRect: DOMRect) { } function MaskItem(props: { mask: Mask; onClick?: () => void }) { + const modelConfig = useChatStore().extractModelConfig(props.mask.config); + return (
- +
{props.mask.name}
); diff --git a/app/components/settings.tsx b/app/components/settings.tsx index 795469a96..ffe3850f0 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -30,16 +30,15 @@ import { showConfirm, showToast, } from "./ui-lib"; -import { ModelConfigList } from "./model-config"; import { IconButton } from "./button"; import { - SubmitKey, useChatStore, - Theme, useUpdateStore, useAccessStore, useAppConfig, + LLMProvider, + LLMProviders, } from "../store"; import Locale, { @@ -61,6 +60,14 @@ import { useSyncStore } from "../store/sync"; import { nanoid } from "nanoid"; import { useMaskStore } from "../store/mask"; import { ProviderType } from "../utils/cloud"; +import { + ChatConfigList, + ModelConfigList, + ProviderConfigList, + ProviderSelectItem, +} from "./config"; +import { SubmitKey, Theme } from "../typing"; +import { deepClone } from "../utils/clone"; function EditPromptModal(props: { id: string; onClose: () => void }) { const promptStore = usePromptStore(); @@ -757,8 +764,7 @@ export function Settings() { step="1" onChange={(e) => updateConfig( - (config) => - (config.fontSize = Number.parseInt(e.currentTarget.value)), + (config) => (config.fontSize = e.currentTarget.valueAsNumber), ) } > @@ -770,11 +776,14 @@ export function Settings() { > updateConfig( (config) => - (config.enableAutoGenerateTitle = e.currentTarget.checked), + (config.globalMaskConfig.chatConfig.enableAutoGenerateTitle = + e.currentTarget.checked), ) } > @@ -877,7 +886,9 @@ export function Settings() { type="text" placeholder={Locale.Settings.AccessCode.Placeholder} onChange={(e) => { - accessStore.updateCode(e.currentTarget.value); + accessStore.update( + (config) => (config.accessCode = e.currentTarget.value), + ); }} /> @@ -885,36 +896,7 @@ export function Settings() { <> )} - {!accessStore.hideUserApiKey ? ( - <> - - - accessStore.updateOpenAiUrl(e.currentTarget.value) - } - > - - - { - accessStore.updateToken(e.currentTarget.value); - }} - /> - - - ) : null} + {!accessStore.hideUserApiKey ? <> : null} {!accessStore.hideBalanceQuery ? ( ) : null} - - - - config.update( - (config) => (config.customModels = e.currentTarget.value), - ) - } - > - + + config.update((_config) => { + _config.globalMaskConfig.provider = value; + }) + } + /> + + { + config.update((_config) => update(_config.providerConfig)); + }} + /> { - const modelConfig = { ...config.modelConfig }; + const modelConfig = { ...config.globalMaskConfig.modelConfig }; updater(modelConfig); - config.update((config) => (config.modelConfig = modelConfig)); + config.update( + (config) => (config.globalMaskConfig.modelConfig = modelConfig), + ); + }} + /> + { + const chatConfig = deepClone(config.globalMaskConfig.chatConfig); + updater(chatConfig); + config.update( + (config) => (config.globalMaskConfig.chatConfig = chatConfig), + ); }} /> diff --git a/app/constant.ts b/app/constant.ts index e03e00971..15cdf412f 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -8,8 +8,8 @@ export const FETCH_COMMIT_URL = `https://api.github.com/repos/${OWNER}/${REPO}/c export const FETCH_TAG_URL = `https://api.github.com/repos/${OWNER}/${REPO}/tags?per_page=1`; export const RUNTIME_CONFIG_DOM = "danger-runtime-config"; -export const DEFAULT_CORS_HOST = "https://ab.nextweb.fun"; -export const DEFAULT_API_HOST = `${DEFAULT_CORS_HOST}/api/proxy`; +export const REMOTE_CORS_HOST = "https://ab.nextweb.fun"; +export const REMOTE_API_HOST = `${REMOTE_CORS_HOST}/api/proxy`; export enum Path { Home = "/", @@ -20,8 +20,12 @@ export enum Path { Auth = "/auth", } +export const API_PREFIX = "/api"; + export enum ApiPath { + OpenAI = "/api/openai", Cors = "/api/cors", + Config = "/api/config", } export enum SlotID { @@ -59,12 +63,12 @@ export const REQUEST_TIMEOUT_MS = 60000; export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; -export const OpenaiPath = { - ChatPath: "v1/chat/completions", - UsagePath: "dashboard/billing/usage", - SubsPath: "dashboard/billing/subscription", - ListModelPath: "v1/models", -}; +export enum OpenaiPath { + Chat = "v1/chat/completions", + Usage = "dashboard/billing/usage", + Subs = "dashboard/billing/subscription", + ListModel = "v1/models", +} export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang export const DEFAULT_SYSTEM_TEMPLATE = ` diff --git a/app/locales/ar.ts b/app/locales/ar.ts index d5844acd6..221c1bc7e 100644 --- a/app/locales/ar.ts +++ b/app/locales/ar.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const ar: PartialLocaleType = { diff --git a/app/locales/bn.ts b/app/locales/bn.ts index 2db132cec..7660924d4 100644 --- a/app/locales/bn.ts +++ b/app/locales/bn.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import { PartialLocaleType } from "./index"; const bn: PartialLocaleType = { diff --git a/app/locales/cn.ts b/app/locales/cn.ts index 4cd963fb8..39b0a676d 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -1,5 +1,5 @@ import { getClientConfig } from "../config/client"; -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; const isApp = !!getClientConfig()?.isApp; diff --git a/app/locales/cs.ts b/app/locales/cs.ts index 57aa803e4..5cee4f721 100644 --- a/app/locales/cs.ts +++ b/app/locales/cs.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const cs: PartialLocaleType = { diff --git a/app/locales/de.ts b/app/locales/de.ts index e0bdc52b7..f7d3de0aa 100644 --- a/app/locales/de.ts +++ b/app/locales/de.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const de: PartialLocaleType = { diff --git a/app/locales/en.ts b/app/locales/en.ts index 928c4b72d..882afbaa0 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -1,5 +1,5 @@ import { getClientConfig } from "../config/client"; -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import { LocaleType } from "./index"; // if you are adding a new translation, please use PartialLocaleType instead of LocaleType diff --git a/app/locales/es.ts b/app/locales/es.ts index a6ae154f4..200535a44 100644 --- a/app/locales/es.ts +++ b/app/locales/es.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const es: PartialLocaleType = { diff --git a/app/locales/fr.ts b/app/locales/fr.ts index f5200f271..64a98f3e7 100644 --- a/app/locales/fr.ts +++ b/app/locales/fr.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const fr: PartialLocaleType = { diff --git a/app/locales/id.ts b/app/locales/id.ts index b5e4a70b7..ae536ee11 100644 --- a/app/locales/id.ts +++ b/app/locales/id.ts @@ -1,11 +1,12 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import { PartialLocaleType } from "./index"; const id: PartialLocaleType = { WIP: "Coming Soon...", Error: { - Unauthorized: "Akses tidak diizinkan, silakan masukkan kode akses atau masukkan kunci API OpenAI Anda. di halaman [autentikasi](/#/auth) atau di halaman [Pengaturan](/#/settings).", - }, + Unauthorized: + "Akses tidak diizinkan, silakan masukkan kode akses atau masukkan kunci API OpenAI Anda. di halaman [autentikasi](/#/auth) atau di halaman [Pengaturan](/#/settings).", + }, Auth: { Title: "Diperlukan Kode Akses", Tips: "Masukkan kode akses di bawah", diff --git a/app/locales/it.ts b/app/locales/it.ts index bf20747b1..d3f2033f7 100644 --- a/app/locales/it.ts +++ b/app/locales/it.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const it: PartialLocaleType = { diff --git a/app/locales/jp.ts b/app/locales/jp.ts index b63e8ba3a..57e9e507e 100644 --- a/app/locales/jp.ts +++ b/app/locales/jp.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const jp: PartialLocaleType = { @@ -20,7 +20,8 @@ const jp: PartialLocaleType = { Stop: "停止", Retry: "リトライ", Pin: "ピン", - PinToastContent: "コンテキストプロンプトに1つのメッセージをピン留めしました", + PinToastContent: + "コンテキストプロンプトに1つのメッセージをピン留めしました", PinToastAction: "表示", Delete: "削除", Edit: "編集", diff --git a/app/locales/ko.ts b/app/locales/ko.ts index 717ce30b2..ee6bf9ad2 100644 --- a/app/locales/ko.ts +++ b/app/locales/ko.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; diff --git a/app/locales/no.ts b/app/locales/no.ts index 43c92916f..c030c03d5 100644 --- a/app/locales/no.ts +++ b/app/locales/no.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const no: PartialLocaleType = { diff --git a/app/locales/ru.ts b/app/locales/ru.ts index bf98b4eb8..258792639 100644 --- a/app/locales/ru.ts +++ b/app/locales/ru.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const ru: PartialLocaleType = { diff --git a/app/locales/tr.ts b/app/locales/tr.ts index 06996d83d..6b2164711 100644 --- a/app/locales/tr.ts +++ b/app/locales/tr.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const tr: PartialLocaleType = { diff --git a/app/locales/tw.ts b/app/locales/tw.ts index e9f38d097..868ffd671 100644 --- a/app/locales/tw.ts +++ b/app/locales/tw.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const tw: PartialLocaleType = { diff --git a/app/locales/vi.ts b/app/locales/vi.ts index 8f53a3dc1..1f8b49ab5 100644 --- a/app/locales/vi.ts +++ b/app/locales/vi.ts @@ -1,4 +1,4 @@ -import { SubmitKey } from "../store/config"; +import { SubmitKey } from "@/app/typing"; import type { PartialLocaleType } from "./index"; const vi: PartialLocaleType = { diff --git a/app/masks/typing.ts b/app/masks/typing.ts index 1ded6a902..7fba6cec4 100644 --- a/app/masks/typing.ts +++ b/app/masks/typing.ts @@ -1,7 +1,9 @@ import { ModelConfig } from "../store"; import { type Mask } from "../store/mask"; -export type BuiltinMask = Omit & { - builtin: Boolean; - modelConfig: Partial; -}; +export type BuiltinMask = + | any + | (Omit & { + builtin: Boolean; + modelConfig: Partial; + }); diff --git a/app/store/access.ts b/app/store/access.ts index 9eaa81e5e..a27b3276b 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -1,23 +1,20 @@ -import { DEFAULT_API_HOST, DEFAULT_MODELS, StoreKey } from "../constant"; -import { getHeaders } from "../client/api"; +import { REMOTE_API_HOST, DEFAULT_MODELS, StoreKey } from "../constant"; import { getClientConfig } from "../config/client"; import { createPersistStore } from "../utils/store"; +import { getAuthHeaders } from "../client/common/auth"; let fetchState = 0; // 0 not fetch, 1 fetching, 2 done const DEFAULT_OPENAI_URL = - getClientConfig()?.buildMode === "export" ? DEFAULT_API_HOST : "/api/openai/"; + getClientConfig()?.buildMode === "export" ? REMOTE_API_HOST : "/api/openai/"; console.log("[API] default openai url", DEFAULT_OPENAI_URL); const DEFAULT_ACCESS_STATE = { - token: "", accessCode: "", needCode: true, hideUserApiKey: false, hideBalanceQuery: false, disableGPT4: false, - - openaiUrl: DEFAULT_OPENAI_URL, }; export const useAccessStore = createPersistStore( @@ -25,35 +22,24 @@ export const useAccessStore = createPersistStore( (set, get) => ({ enabledAccessControl() { - this.fetch(); + this.fetchConfig(); return get().needCode; }, - updateCode(code: string) { - set(() => ({ accessCode: code?.trim() })); - }, - updateToken(token: string) { - set(() => ({ token: token?.trim() })); - }, - updateOpenAiUrl(url: string) { - set(() => ({ openaiUrl: url?.trim() })); - }, isAuthorized() { - this.fetch(); + this.fetchConfig(); // has token or has code or disabled access control - return ( - !!get().token || !!get().accessCode || !this.enabledAccessControl() - ); + return !!get().accessCode || !this.enabledAccessControl(); }, - fetch() { + fetchConfig() { if (fetchState > 0 || getClientConfig()?.buildMode === "export") return; fetchState = 1; fetch("/api/config", { method: "post", body: null, headers: { - ...getHeaders(), + ...getAuthHeaders(), }, }) .then((res) => res.json()) diff --git a/app/store/chat.ts b/app/store/chat.ts index 56ac8db6c..2a66a359b 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -2,7 +2,13 @@ import { trimTopic } from "../utils"; import Locale, { getLang } from "../locales"; import { showToast } from "../components/ui-lib"; -import { ModelConfig, ModelType, useAppConfig } from "./config"; +import { + LLMProvider, + MaskConfig, + ModelConfig, + ModelType, + useAppConfig, +} from "./config"; import { createEmptyMask, Mask } from "./mask"; import { DEFAULT_INPUT_TEMPLATE, @@ -10,19 +16,19 @@ import { StoreKey, SUMMARIZE_MODEL, } from "../constant"; -import { api, RequestMessage } from "../client/api"; -import { ChatControllerPool } from "../client/controller"; +import { ChatControllerPool } from "../client/common/controller"; import { prettyObject } from "../utils/format"; import { estimateTokenLength } from "../utils/token"; import { nanoid } from "nanoid"; import { createPersistStore } from "../utils/store"; +import { RequestMessage, api } from "../client"; export type ChatMessage = RequestMessage & { date: string; streaming?: boolean; isError?: boolean; id: string; - model?: ModelType; + model?: string; }; export function createMessage(override: Partial): ChatMessage { @@ -84,46 +90,25 @@ function getSummarizeModel(currentModel: string) { return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel; } -interface ChatStore { - sessions: ChatSession[]; - currentSessionIndex: number; - clearSessions: () => void; - moveSession: (from: number, to: number) => void; - selectSession: (index: number) => void; - newSession: (mask?: Mask) => void; - deleteSession: (index: number) => void; - currentSession: () => ChatSession; - nextSession: (delta: number) => void; - onNewMessage: (message: ChatMessage) => void; - onUserInput: (content: string) => Promise; - summarizeSession: () => void; - updateStat: (message: ChatMessage) => void; - updateCurrentSession: (updater: (session: ChatSession) => void) => void; - updateMessage: ( - sessionIndex: number, - messageIndex: number, - updater: (message?: ChatMessage) => void, - ) => void; - resetSession: () => void; - getMessagesWithMemory: () => ChatMessage[]; - getMemoryPrompt: () => ChatMessage; - - clearAllData: () => void; -} - function countMessages(msgs: ChatMessage[]) { return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0); } -function fillTemplateWith(input: string, modelConfig: ModelConfig) { +function fillTemplateWith( + input: string, + context: { + model: string; + template?: string; + }, +) { const vars = { - model: modelConfig.model, + model: context.model, time: new Date().toLocaleString(), lang: getLang(), input: input, }; - let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE; + let output = context.template ?? DEFAULT_INPUT_TEMPLATE; // must contains {{input}} const inputVar = "{{input}}"; @@ -197,13 +182,13 @@ export const useChatStore = createPersistStore( if (mask) { const config = useAppConfig.getState(); - const globalModelConfig = config.modelConfig; + const globalModelConfig = config.globalMaskConfig; session.mask = { ...mask, - modelConfig: { + config: { ...globalModelConfig, - ...mask.modelConfig, + ...mask.config, }, }; session.topic = mask.name; @@ -288,11 +273,39 @@ export const useChatStore = createPersistStore( get().summarizeSession(); }, + getCurrentMaskConfig() { + return get().currentSession().mask.config; + }, + + extractModelConfig(maskConfig: MaskConfig) { + const provider = maskConfig.provider; + if (!maskConfig.modelConfig[provider]) { + throw Error("[Chat] failed to initialize provider: " + provider); + } + + return maskConfig.modelConfig[provider]; + }, + + getCurrentModelConfig() { + const maskConfig = this.getCurrentMaskConfig(); + return this.extractModelConfig(maskConfig); + }, + + getClient() { + const appConfig = useAppConfig.getState(); + const currentMaskConfig = get().getCurrentMaskConfig(); + return api.createLLMClient(appConfig.providerConfig, currentMaskConfig); + }, + async onUserInput(content: string) { const session = get().currentSession(); - const modelConfig = session.mask.modelConfig; + const maskConfig = this.getCurrentMaskConfig(); + const modelConfig = this.getCurrentModelConfig(); - const userContent = fillTemplateWith(content, modelConfig); + const userContent = fillTemplateWith(content, { + model: modelConfig.model, + template: maskConfig.chatConfig.template, + }); console.log("[User Input] after template: ", userContent); const userMessage: ChatMessage = createMessage({ @@ -323,10 +336,11 @@ export const useChatStore = createPersistStore( ]); }); + const client = this.getClient(); + // make request - api.llm.chat({ + client.chatStream({ messages: sendMessages, - config: { ...modelConfig, stream: true }, onUpdate(message) { botMessage.streaming = true; if (message) { @@ -391,7 +405,9 @@ export const useChatStore = createPersistStore( getMessagesWithMemory() { const session = get().currentSession(); - const modelConfig = session.mask.modelConfig; + const maskConfig = this.getCurrentMaskConfig(); + const chatConfig = maskConfig.chatConfig; + const modelConfig = this.getCurrentModelConfig(); const clearContextIndex = session.clearContextIndex ?? 0; const messages = session.messages.slice(); const totalMessageCount = session.messages.length; @@ -400,14 +416,14 @@ export const useChatStore = createPersistStore( const contextPrompts = session.mask.context.slice(); // system prompts, to get close to OpenAI Web ChatGPT - const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts; + const shouldInjectSystemPrompts = chatConfig.enableInjectSystemPrompts; const systemPrompts = shouldInjectSystemPrompts ? [ createMessage({ role: "system", content: fillTemplateWith("", { - ...modelConfig, - template: DEFAULT_SYSTEM_TEMPLATE, + model: modelConfig.model, + template: chatConfig.template, }), }), ] @@ -421,7 +437,7 @@ export const useChatStore = createPersistStore( // long term memory const shouldSendLongTermMemory = - modelConfig.sendMemory && + chatConfig.sendMemory && session.memoryPrompt && session.memoryPrompt.length > 0 && session.lastSummarizeIndex > clearContextIndex; @@ -433,7 +449,7 @@ export const useChatStore = createPersistStore( // short term memory const shortTermMemoryStartIndex = Math.max( 0, - totalMessageCount - modelConfig.historyMessageCount, + totalMessageCount - chatConfig.historyMessageCount, ); // lets concat send messages, including 4 parts: @@ -494,6 +510,8 @@ export const useChatStore = createPersistStore( summarizeSession() { const config = useAppConfig.getState(); + const maskConfig = this.getCurrentMaskConfig(); + const chatConfig = maskConfig.chatConfig; const session = get().currentSession(); // remove error messages if any @@ -502,7 +520,7 @@ export const useChatStore = createPersistStore( // should summarize topic after chating more than 50 words const SUMMARIZE_MIN_LEN = 50; if ( - config.enableAutoGenerateTitle && + chatConfig.enableAutoGenerateTitle && session.topic === DEFAULT_TOPIC && countMessages(messages) >= SUMMARIZE_MIN_LEN ) { @@ -512,11 +530,12 @@ export const useChatStore = createPersistStore( content: Locale.Store.Prompt.Topic, }), ); - api.llm.chat({ + + const client = this.getClient(); + client.chat({ messages: topicMessages, - config: { - model: getSummarizeModel(session.mask.modelConfig.model), - }, + shouldSummarize: true, + onFinish(message) { get().updateCurrentSession( (session) => @@ -527,7 +546,7 @@ export const useChatStore = createPersistStore( }); } - const modelConfig = session.mask.modelConfig; + const modelConfig = this.getCurrentModelConfig(); const summarizeIndex = Math.max( session.lastSummarizeIndex, session.clearContextIndex ?? 0, @@ -541,7 +560,7 @@ export const useChatStore = createPersistStore( if (historyMsgLength > modelConfig?.max_tokens ?? 4000) { const n = toBeSummarizedMsgs.length; toBeSummarizedMsgs = toBeSummarizedMsgs.slice( - Math.max(0, n - modelConfig.historyMessageCount), + Math.max(0, n - chatConfig.historyMessageCount), ); } @@ -554,14 +573,14 @@ export const useChatStore = createPersistStore( "[Chat History] ", toBeSummarizedMsgs, historyMsgLength, - modelConfig.compressMessageLengthThreshold, + chatConfig.compressMessageLengthThreshold, ); if ( - historyMsgLength > modelConfig.compressMessageLengthThreshold && - modelConfig.sendMemory + historyMsgLength > chatConfig.compressMessageLengthThreshold && + chatConfig.sendMemory ) { - api.llm.chat({ + this.getClient().chatStream({ messages: toBeSummarizedMsgs.concat( createMessage({ role: "system", @@ -569,11 +588,7 @@ export const useChatStore = createPersistStore( date: "", }), ), - config: { - ...modelConfig, - stream: true, - model: getSummarizeModel(session.mask.modelConfig.model), - }, + shouldSummarize: true, onUpdate(message) { session.memoryPrompt = message; }, @@ -614,52 +629,9 @@ export const useChatStore = createPersistStore( name: StoreKey.Chat, version: 3.1, migrate(persistedState, version) { - const state = persistedState as any; - const newState = JSON.parse( - JSON.stringify(state), - ) as typeof DEFAULT_CHAT_STATE; + // TODO(yifei): migrate from old versions - if (version < 2) { - newState.sessions = []; - - const oldSessions = state.sessions; - for (const oldSession of oldSessions) { - const newSession = createEmptySession(); - newSession.topic = oldSession.topic; - newSession.messages = [...oldSession.messages]; - newSession.mask.modelConfig.sendMemory = true; - newSession.mask.modelConfig.historyMessageCount = 4; - newSession.mask.modelConfig.compressMessageLengthThreshold = 1000; - newState.sessions.push(newSession); - } - } - - if (version < 3) { - // migrate id to nanoid - newState.sessions.forEach((s) => { - s.id = nanoid(); - s.messages.forEach((m) => (m.id = nanoid())); - }); - } - - // Enable `enableInjectSystemPrompts` attribute for old sessions. - // Resolve issue of old sessions not automatically enabling. - if (version < 3.1) { - newState.sessions.forEach((s) => { - if ( - // Exclude those already set by user - !s.mask.modelConfig.hasOwnProperty("enableInjectSystemPrompts") - ) { - // Because users may have changed this configuration, - // the user's current configuration is used instead of the default - const config = useAppConfig.getState(); - s.mask.modelConfig.enableInjectSystemPrompts = - config.modelConfig.enableInjectSystemPrompts; - } - }); - } - - return newState as any; + return persistedState as any; }, }, ); diff --git a/app/store/config.ts b/app/store/config.ts index 184355c94..6f388a8b1 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -1,4 +1,3 @@ -import { LLMModel } from "../client/api"; import { isMacOS } from "../utils"; import { getClientConfig } from "../config/client"; import { @@ -8,24 +7,85 @@ import { StoreKey, } from "../constant"; import { createPersistStore } from "../utils/store"; +import { OpenAIConfig } from "../client/openai/config"; +import { api } from "../client"; +import { SubmitKey, Theme } from "../typing"; export type ModelType = (typeof DEFAULT_MODELS)[number]["name"]; -export enum SubmitKey { - Enter = "Enter", - CtrlEnter = "Ctrl + Enter", - ShiftEnter = "Shift + Enter", - AltEnter = "Alt + Enter", - MetaEnter = "Meta + Enter", -} +export const DEFAULT_CHAT_CONFIG = { + enableAutoGenerateTitle: true, + sendMemory: true, + historyMessageCount: 4, + compressMessageLengthThreshold: 1000, + enableInjectSystemPrompts: true, + template: DEFAULT_INPUT_TEMPLATE, +}; +export type ChatConfig = typeof DEFAULT_CHAT_CONFIG; -export enum Theme { - Auto = "auto", - Dark = "dark", - Light = "light", -} +export const DEFAULT_PROVIDER_CONFIG = { + openai: OpenAIConfig.provider, + // azure: { + // endpoint: "https://api.openai.com", + // apiKey: "", + // version: "", + // ...COMMON_PROVIDER_CONFIG, + // }, + // claude: { + // endpoint: "https://api.anthropic.com", + // apiKey: "", + // ...COMMON_PROVIDER_CONFIG, + // }, + // google: { + // endpoint: "https://api.anthropic.com", + // apiKey: "", + // ...COMMON_PROVIDER_CONFIG, + // }, +}; -export const DEFAULT_CONFIG = { +export const DEFAULT_MODEL_CONFIG = { + openai: OpenAIConfig.model, + // azure: { + // model: "gpt-3.5-turbo" as string, + // summarizeModel: "gpt-3.5-turbo", + // + // temperature: 0.5, + // top_p: 1, + // max_tokens: 2000, + // presence_penalty: 0, + // frequency_penalty: 0, + // }, + // claude: { + // model: "claude-2", + // summarizeModel: "claude-2", + // + // max_tokens_to_sample: 100000, + // temperature: 1, + // top_p: 0.7, + // top_k: 1, + // }, + // google: { + // model: "chat-bison-001", + // summarizeModel: "claude-2", + // + // temperature: 1, + // topP: 0.7, + // topK: 1, + // }, +}; + +export type LLMProvider = keyof typeof DEFAULT_PROVIDER_CONFIG; +export const LLMProviders = Array.from( + Object.entries(DEFAULT_PROVIDER_CONFIG), +).map(([k, v]) => [v.name, k]); + +export const DEFAULT_MASK_CONFIG = { + provider: "openai" as LLMProvider, + chatConfig: { ...DEFAULT_CHAT_CONFIG }, + modelConfig: { ...DEFAULT_MODEL_CONFIG }, +}; + +export const DEFAULT_APP_CONFIG = { lastUpdate: Date.now(), // timestamp, to merge state submitKey: isMacOS() ? SubmitKey.MetaEnter : SubmitKey.CtrlEnter, @@ -34,7 +94,6 @@ export const DEFAULT_CONFIG = { theme: Theme.Auto as Theme, tightBorder: !!getClientConfig()?.isApp, sendPreviewBubble: true, - enableAutoGenerateTitle: true, sidebarWidth: DEFAULT_SIDEBAR_WIDTH, disablePromptHint: false, @@ -42,27 +101,14 @@ export const DEFAULT_CONFIG = { dontShowMaskSplashScreen: false, // dont show splash screen when create chat hideBuiltinMasks: false, // dont add builtin masks - customModels: "", - models: DEFAULT_MODELS as any as LLMModel[], - - modelConfig: { - model: "gpt-3.5-turbo" as ModelType, - temperature: 0.5, - top_p: 1, - max_tokens: 2000, - presence_penalty: 0, - frequency_penalty: 0, - sendMemory: true, - historyMessageCount: 4, - compressMessageLengthThreshold: 1000, - enableInjectSystemPrompts: true, - template: DEFAULT_INPUT_TEMPLATE, - }, + providerConfig: { ...DEFAULT_PROVIDER_CONFIG }, + globalMaskConfig: { ...DEFAULT_MASK_CONFIG }, }; -export type ChatConfig = typeof DEFAULT_CONFIG; - -export type ModelConfig = ChatConfig["modelConfig"]; +export type AppConfig = typeof DEFAULT_APP_CONFIG; +export type ProviderConfig = typeof DEFAULT_PROVIDER_CONFIG; +export type MaskConfig = typeof DEFAULT_MASK_CONFIG; +export type ModelConfig = typeof DEFAULT_MODEL_CONFIG; export function limitNumber( x: number, @@ -99,48 +145,21 @@ export const ModalConfigValidator = { }; export const useAppConfig = createPersistStore( - { ...DEFAULT_CONFIG }, + { ...DEFAULT_APP_CONFIG }, (set, get) => ({ reset() { - set(() => ({ ...DEFAULT_CONFIG })); + set(() => ({ ...DEFAULT_APP_CONFIG })); }, - mergeModels(newModels: LLMModel[]) { - if (!newModels || newModels.length === 0) { - return; - } - - const oldModels = get().models; - const modelMap: Record = {}; - - for (const model of oldModels) { - model.available = false; - modelMap[model.name] = model; - } - - for (const model of newModels) { - model.available = true; - modelMap[model.name] = model; - } - - set(() => ({ - models: Object.values(modelMap), - })); - }, - - allModels() { - const customModels = get() - .customModels.split(",") - .filter((v) => !!v && v.length > 0) - .map((m) => ({ name: m, available: true })); - return get().models.concat(customModels); + getDefaultClient() { + return api.createLLMClient(get().providerConfig, get().globalMaskConfig); }, }), { name: StoreKey.Config, - version: 3.8, + version: 4, migrate(persistedState, version) { - const state = persistedState as ChatConfig; + const state = persistedState as any; if (version < 3.4) { state.modelConfig.sendMemory = true; @@ -169,6 +188,10 @@ export const useAppConfig = createPersistStore( state.lastUpdate = Date.now(); } + if (version < 4) { + // todo: migarte from old versions + } + return state as any; }, }, diff --git a/app/store/mask.ts b/app/store/mask.ts index dfd4089b7..6fcf7b9b8 100644 --- a/app/store/mask.ts +++ b/app/store/mask.ts @@ -1,10 +1,11 @@ import { BUILTIN_MASKS } from "../masks"; import { getLang, Lang } from "../locales"; import { DEFAULT_TOPIC, ChatMessage } from "./chat"; -import { ModelConfig, useAppConfig } from "./config"; +import { MaskConfig, ModelConfig, useAppConfig } from "./config"; import { StoreKey } from "../constant"; import { nanoid } from "nanoid"; import { createPersistStore } from "../utils/store"; +import { deepClone } from "../utils/clone"; export type Mask = { id: string; @@ -14,7 +15,9 @@ export type Mask = { hideContext?: boolean; context: ChatMessage[]; syncGlobalConfig?: boolean; - modelConfig: ModelConfig; + + config: MaskConfig; + lang: Lang; builtin: boolean; }; @@ -33,7 +36,7 @@ export const createEmptyMask = () => name: DEFAULT_TOPIC, context: [], syncGlobalConfig: true, // use global config as default - modelConfig: { ...useAppConfig.getState().modelConfig }, + config: deepClone(useAppConfig.getState().globalMaskConfig), lang: getLang(), builtin: false, createdAt: Date.now(), @@ -87,10 +90,11 @@ export const useMaskStore = createPersistStore( const buildinMasks = BUILTIN_MASKS.map( (m) => ({ + id: m.name, ...m, - modelConfig: { - ...config.modelConfig, - ...m.modelConfig, + config: { + ...config.globalMaskConfig, + ...m.config, }, }) as Mask, ); @@ -120,6 +124,8 @@ export const useMaskStore = createPersistStore( newState.masks = updatedMasks; } + // TODO(yifei): migrate old masks + return newState as any; }, }, diff --git a/app/store/sync.ts b/app/store/sync.ts index b74f6895f..17cfdd2fd 100644 --- a/app/store/sync.ts +++ b/app/store/sync.ts @@ -13,7 +13,7 @@ import { downloadAs, readFromFile } from "../utils"; import { showToast } from "../components/ui-lib"; import Locale from "../locales"; import { createSyncClient, ProviderType } from "../utils/cloud"; -import { corsPath } from "../utils/cors"; +import { getApiPath } from "../utils/path"; export interface WebDavConfig { server: string; @@ -27,7 +27,7 @@ export type SyncStore = GetStoreState; const DEFAULT_SYNC_STATE = { provider: ProviderType.WebDAV, useProxy: true, - proxyUrl: corsPath(ApiPath.Cors), + proxyUrl: getApiPath(ApiPath.Cors), webdav: { endpoint: "", diff --git a/app/store/update.ts b/app/store/update.ts index 2b088a13d..0e63e1203 100644 --- a/app/store/update.ts +++ b/app/store/update.ts @@ -1,5 +1,4 @@ import { FETCH_COMMIT_URL, FETCH_TAG_URL, StoreKey } from "../constant"; -import { api } from "../client/api"; import { getClientConfig } from "../config/client"; import { createPersistStore } from "../utils/store"; import ChatGptIcon from "../icons/chatgpt.png"; @@ -85,35 +84,40 @@ export const useUpdateStore = createPersistStore( })); if (window.__TAURI__?.notification && isApp) { // Check if notification permission is granted - await window.__TAURI__?.notification.isPermissionGranted().then((granted) => { - if (!granted) { - return; - } else { - // Request permission to show notifications - window.__TAURI__?.notification.requestPermission().then((permission) => { - if (permission === 'granted') { - if (version === remoteId) { - // Show a notification using Tauri - window.__TAURI__?.notification.sendNotification({ - title: "ChatGPT Next Web", - body: `${Locale.Settings.Update.IsLatest}`, - icon: `${ChatGptIcon.src}`, - sound: "Default" - }); - } else { - const updateMessage = Locale.Settings.Update.FoundUpdate(`${remoteId}`); - // Show a notification for the new version using Tauri - window.__TAURI__?.notification.sendNotification({ - title: "ChatGPT Next Web", - body: updateMessage, - icon: `${ChatGptIcon.src}`, - sound: "Default" - }); - } - } - }); - } - }); + await window.__TAURI__?.notification + .isPermissionGranted() + .then((granted) => { + if (!granted) { + return; + } else { + // Request permission to show notifications + window.__TAURI__?.notification + .requestPermission() + .then((permission) => { + if (permission === "granted") { + if (version === remoteId) { + // Show a notification using Tauri + window.__TAURI__?.notification.sendNotification({ + title: "ChatGPT Next Web", + body: `${Locale.Settings.Update.IsLatest}`, + icon: `${ChatGptIcon.src}`, + sound: "Default", + }); + } else { + const updateMessage = + Locale.Settings.Update.FoundUpdate(`${remoteId}`); + // Show a notification for the new version using Tauri + window.__TAURI__?.notification.sendNotification({ + title: "ChatGPT Next Web", + body: updateMessage, + icon: `${ChatGptIcon.src}`, + sound: "Default", + }); + } + } + }); + } + }); } console.log("[Got Upstream] ", remoteId); } catch (error) { @@ -130,14 +134,7 @@ export const useUpdateStore = createPersistStore( })); try { - const usage = await api.llm.usage(); - - if (usage) { - set(() => ({ - used: usage.used, - subscription: usage.total, - })); - } + // TODO: add check usage api here } catch (e) { console.error((e as Error).message); } diff --git a/app/typing.ts b/app/typing.ts index 25e474abf..6ed87882f 100644 --- a/app/typing.ts +++ b/app/typing.ts @@ -1 +1,15 @@ export type Updater = (updater: (value: T) => void) => void; + +export enum SubmitKey { + Enter = "Enter", + CtrlEnter = "Ctrl + Enter", + ShiftEnter = "Shift + Enter", + AltEnter = "Alt + Enter", + MetaEnter = "Meta + Enter", +} + +export enum Theme { + Auto = "auto", + Dark = "dark", + Light = "light", +} diff --git a/app/utils/clone.ts b/app/utils/clone.ts index 2958b6b9c..e8971acfb 100644 --- a/app/utils/clone.ts +++ b/app/utils/clone.ts @@ -1,3 +1,3 @@ -export function deepClone(obj: T) { +export function deepClone(obj: T): T { return JSON.parse(JSON.stringify(obj)); } diff --git a/app/utils/cloud/index.ts b/app/utils/cloud/index.ts index 63908249e..e6905bb29 100644 --- a/app/utils/cloud/index.ts +++ b/app/utils/cloud/index.ts @@ -1,5 +1,6 @@ import { createWebDavClient } from "./webdav"; import { createUpstashClient } from "./upstash"; +import { SyncStore } from "@/app/store/sync"; export enum ProviderType { WebDAV = "webdav", @@ -27,7 +28,7 @@ export type SyncClient = { export function createSyncClient( provider: T, - config: SyncClientConfig[T], + store: SyncStore, ): SyncClient { - return SyncClients[provider](config as any) as any; + return SyncClients[provider](store); } diff --git a/app/utils/cloud/upstash.ts b/app/utils/cloud/upstash.ts index 5f5b9fc79..abc1b4cc9 100644 --- a/app/utils/cloud/upstash.ts +++ b/app/utils/cloud/upstash.ts @@ -57,7 +57,7 @@ export function createUpstashClient(store: SyncStore) { async get() { const chunkCount = Number(await this.redisGet(chunkCountKey)); - if (!Number.isInteger(chunkCount)) return; + if (!Number.isInteger(chunkCount)) return ""; const chunks = await Promise.all( new Array(chunkCount) diff --git a/app/utils/cors.ts b/app/utils/cors.ts index 773f152aa..6eb77705e 100644 --- a/app/utils/cors.ts +++ b/app/utils/cors.ts @@ -1,19 +1,5 @@ -import { getClientConfig } from "../config/client"; -import { ApiPath, DEFAULT_CORS_HOST } from "../constant"; - -export function corsPath(path: string) { - const baseUrl = getClientConfig()?.isApp ? `${DEFAULT_CORS_HOST}` : ""; - - if (!path.startsWith("/")) { - path = "/" + path; - } - - if (!path.endsWith("/")) { - path += "/"; - } - - return `${baseUrl}${path}`; -} +import { ApiPath } from "../constant"; +import { getApiPath } from "./path"; export function corsFetch( url: string, @@ -25,7 +11,7 @@ export function corsFetch( throw Error("[CORS Fetch] url must starts with http/https"); } - let proxyUrl = options.proxyUrl ?? corsPath(ApiPath.Cors); + let proxyUrl = options.proxyUrl ?? getApiPath(ApiPath.Cors); if (!proxyUrl.endsWith("/")) { proxyUrl += "/"; } diff --git a/app/utils/log.ts b/app/utils/log.ts new file mode 100644 index 000000000..443033c53 --- /dev/null +++ b/app/utils/log.ts @@ -0,0 +1,13 @@ +export function createLogger(prefix = "") { + return { + log(...args: any[]) { + console.log(prefix, ...args); + }, + error(...args: any[]) { + console.error(prefix, ...args); + }, + warn(...args: any[]) { + console.warn(prefix, ...args); + }, + }; +} diff --git a/app/utils/object.ts b/app/utils/object.ts new file mode 100644 index 000000000..7fc74aee6 --- /dev/null +++ b/app/utils/object.ts @@ -0,0 +1,17 @@ +export function pick( + obj: T, + ...keys: U +): Pick { + const ret: any = {}; + keys.forEach((key) => (ret[key] = obj[key])); + return ret; +} + +export function omit( + obj: T, + ...keys: U +): Omit { + const ret: any = { ...obj }; + keys.forEach((key) => delete ret[key]); + return ret; +} diff --git a/app/utils/path.ts b/app/utils/path.ts new file mode 100644 index 000000000..6609352d7 --- /dev/null +++ b/app/utils/path.ts @@ -0,0 +1,16 @@ +import { getClientConfig } from "../config/client"; +import { ApiPath, REMOTE_API_HOST } from "../constant"; + +/** + * Get api path according to desktop/web env + * + * 1. In desktop app, we always try to use a remote full path for better network experience + * 2. In web app, we always try to use the original relative path + * + * @param path - /api/* + * @returns + */ +export function getApiPath(path: ApiPath) { + const baseUrl = getClientConfig()?.isApp ? `${REMOTE_API_HOST}` : ""; + return `${baseUrl}${path}`; +} diff --git a/app/utils/string.ts b/app/utils/string.ts new file mode 100644 index 000000000..68fc47d55 --- /dev/null +++ b/app/utils/string.ts @@ -0,0 +1,19 @@ +export function trimEnd(s: string, end = " ") { + if (end.length === 0) return s; + + while (s.endsWith(end)) { + s = s.slice(0, -end.length); + } + + return s; +} + +export function trimStart(s: string, start = " ") { + if (start.length === 0) return s; + + while (s.endsWith(start)) { + s = s.slice(start.length); + } + + return s; +} From cdf0311d270d5808efca2c9ba07c593a7ec57d41 Mon Sep 17 00:00:00 2001 From: Yidadaa Date: Tue, 7 Nov 2023 23:22:11 +0800 Subject: [PATCH 02/26] feat: add claude and bard --- app/client/anthropic/config.ts | 29 +++ app/client/anthropic/index.ts | 233 +++++++++++++++++++ app/client/anthropic/types.ts | 24 ++ app/client/common/auth.ts | 11 +- app/client/common/config.ts | 5 - app/client/core.ts | 2 + app/client/openai/config.ts | 54 ++++- app/client/openai/index.ts | 71 +----- app/client/types.ts | 4 - app/components/config/anthropic/model.tsx | 79 +++++++ app/components/config/anthropic/provider.tsx | 70 ++++++ app/components/config/index.tsx | 34 ++- app/components/config/openai/provider.tsx | 3 + app/components/model-config.tsx | 139 ----------- app/components/settings.tsx | 91 ++------ app/constant.ts | 54 +---- app/store/access.ts | 10 +- app/store/chat.ts | 37 ++- app/store/config.ts | 22 +- src-tauri/tauri.conf.json | 2 +- 20 files changed, 580 insertions(+), 394 deletions(-) create mode 100644 app/client/anthropic/config.ts create mode 100644 app/client/anthropic/index.ts create mode 100644 app/client/anthropic/types.ts delete mode 100644 app/client/common/config.ts create mode 100644 app/components/config/anthropic/model.tsx create mode 100644 app/components/config/anthropic/provider.tsx delete mode 100644 app/components/model-config.tsx diff --git a/app/client/anthropic/config.ts b/app/client/anthropic/config.ts new file mode 100644 index 000000000..1ba8f4f00 --- /dev/null +++ b/app/client/anthropic/config.ts @@ -0,0 +1,29 @@ +export const AnthropicConfig = { + model: { + model: "claude-instant-1", + summarizeModel: "claude-instant-1", + + max_tokens_to_sample: 8192, + temperature: 0.5, + top_p: 0.7, + top_k: 5, + }, + provider: { + name: "Anthropic" as const, + endpoint: "https://api.anthropic.com", + apiKey: "", + customModels: "", + version: "2023-06-01", + + models: [ + { + name: "claude-instant-1", + available: true, + }, + { + name: "claude-2", + available: true, + }, + ], + }, +}; diff --git a/app/client/anthropic/index.ts b/app/client/anthropic/index.ts new file mode 100644 index 000000000..863fde951 --- /dev/null +++ b/app/client/anthropic/index.ts @@ -0,0 +1,233 @@ +import { ModelConfig, ProviderConfig } from "@/app/store"; +import { createLogger } from "@/app/utils/log"; +import { getAuthKey } from "../common/auth"; +import { API_PREFIX, AnthropicPath, ApiPath } from "@/app/constant"; +import { getApiPath } from "@/app/utils/path"; +import { trimEnd } from "@/app/utils/string"; +import { Anthropic } from "./types"; +import { ChatOptions, LLMModel, LLMUsage, RequestMessage } from "../types"; +import { omit } from "@/app/utils/object"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import { prettyObject } from "@/app/utils/format"; +import Locale from "@/app/locales"; +import { AnthropicConfig } from "./config"; + +export function createAnthropicClient( + providerConfigs: ProviderConfig, + modelConfig: ModelConfig, +) { + const anthropicConfig = { ...providerConfigs.anthropic }; + const logger = createLogger("[Anthropic]"); + const anthropicModelConfig = { ...modelConfig.anthropic }; + + return { + headers() { + return { + "Content-Type": "application/json", + "x-api-key": getAuthKey(anthropicConfig.apiKey), + "anthropic-version": anthropicConfig.version, + }; + }, + + path(path: AnthropicPath): string { + let baseUrl: string = anthropicConfig.endpoint; + + // if endpoint is empty, use default endpoint + if (baseUrl.trim().length === 0) { + baseUrl = getApiPath(ApiPath.Anthropic); + } + + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(API_PREFIX)) { + baseUrl = "https://" + baseUrl; + } + + baseUrl = trimEnd(baseUrl, "/"); + + return `${baseUrl}/${path}`; + }, + + extractMessage(res: Anthropic.ChatResponse) { + return res.completion; + }, + + beforeRequest(options: ChatOptions, stream = false) { + const ClaudeMapper: Record = { + assistant: "Assistant", + user: "Human", + system: "Human", + }; + + const prompt = options.messages + .map((v) => ({ + role: ClaudeMapper[v.role] ?? "Human", + content: v.content, + })) + .map((v) => `\n\n${v.role}: ${v.content}`) + .join(""); + + if (options.shouldSummarize) { + anthropicModelConfig.model = anthropicModelConfig.summarizeModel; + } + + const requestBody: Anthropic.ChatRequest = { + prompt, + stream, + ...omit(anthropicModelConfig, "summarizeModel"), + }; + + const path = this.path(AnthropicPath.Chat); + + logger.log("path = ", path, requestBody); + + const controller = new AbortController(); + options.onController?.(controller); + + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: this.headers(), + mode: "no-cors" as RequestMode, + }; + + return { + path, + payload, + controller, + }; + }, + + async chat(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest( + options, + false, + ); + + controller.signal.onabort = () => options.onFinish(""); + + const res = await fetch(path, payload); + const resJson = await res.json(); + + const message = this.extractMessage(resJson); + options.onFinish(message); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async chatStream(options: ChatOptions) { + try { + const { path, payload, controller } = this.beforeRequest(options, true); + + const context = { + text: "", + finished: false, + }; + + const finish = () => { + if (!context.finished) { + options.onFinish(context.text); + context.finished = true; + } + }; + + controller.signal.onabort = finish; + + logger.log(payload); + + fetchEventSource(path, { + ...payload, + async onopen(res) { + const contentType = res.headers.get("content-type"); + logger.log("response content type: ", contentType); + + if (contentType?.startsWith("text/plain")) { + context.text = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [context.text]; + let extraInfo = await res.clone().text(); + try { + const resJson = await res.clone().json(); + extraInfo = prettyObject(resJson); + } catch {} + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + context.text = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + if (msg.data === "[DONE]" || context.finished) { + return finish(); + } + const chunk = msg.data; + try { + const chunkJson = JSON.parse( + chunk, + ) as Anthropic.ChatStreamResponse; + const delta = chunkJson.completion; + if (delta) { + context.text += delta; + options.onUpdate?.(context.text, delta); + } + } catch (e) { + logger.error("[Request] parse error", chunk, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + }, + openWhenHidden: true, + }); + } catch (e) { + logger.error("failed to chat", e); + options.onError?.(e as Error); + } + }, + + async usage() { + return { + used: 0, + total: 0, + } as LLMUsage; + }, + + async models(): Promise { + const customModels = anthropicConfig.customModels + .split(",") + .map((v) => v.trim()) + .filter((v) => !!v) + .map((v) => ({ + name: v, + available: true, + })); + + return [...AnthropicConfig.provider.models.slice(), ...customModels]; + }, + }; +} diff --git a/app/client/anthropic/types.ts b/app/client/anthropic/types.ts new file mode 100644 index 000000000..347693aa8 --- /dev/null +++ b/app/client/anthropic/types.ts @@ -0,0 +1,24 @@ +export namespace Anthropic { + export interface ChatRequest { + model: string; // The model that will complete your prompt. + prompt: string; // The prompt that you want Claude to complete. + max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping. + stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text. + temperature?: number; // Amount of randomness injected into the response. + top_p?: number; // Use nucleus sampling. + top_k?: number; // Only sample from the top K options for each subsequent token. + metadata?: object; // An object describing metadata about the request. + stream?: boolean; // Whether to incrementally stream the response using server-sent events. + } + + export interface ChatResponse { + completion: string; + stop_reason: "stop_sequence" | "max_tokens"; + model: string; + } + + export type ChatStreamResponse = ChatResponse & { + stop?: string; + log_id: string; + }; +} diff --git a/app/client/common/auth.ts b/app/client/common/auth.ts index 9533ebfd2..f72852388 100644 --- a/app/client/common/auth.ts +++ b/app/client/common/auth.ts @@ -6,23 +6,22 @@ export function bearer(value: string) { return `Bearer ${value.trim()}`; } -export function getAuthHeaders(apiKey = "") { +export function getAuthKey(apiKey = "") { const accessStore = useAccessStore.getState(); const isApp = !!getClientConfig()?.isApp; - - let headers: Record = {}; + let authKey = ""; if (apiKey) { // use user's api key first - headers.Authorization = bearer(apiKey); + authKey = bearer(apiKey); } else if ( accessStore.enabledAccessControl() && !isApp && !!accessStore.accessCode ) { // or use access code - headers.Authorization = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); + authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode); } - return headers; + return authKey; } diff --git a/app/client/common/config.ts b/app/client/common/config.ts deleted file mode 100644 index 127773a4c..000000000 --- a/app/client/common/config.ts +++ /dev/null @@ -1,5 +0,0 @@ -export const COMMON_PROVIDER_CONFIG = { - customModels: "", - models: [] as string[], - autoFetchModels: false, // fetch available models from server or not -}; diff --git a/app/client/core.ts b/app/client/core.ts index a75cf3fc0..8e7305f86 100644 --- a/app/client/core.ts +++ b/app/client/core.ts @@ -2,9 +2,11 @@ import { MaskConfig, ProviderConfig } from "../store"; import { shareToShareGPT } from "./common/share"; import { createOpenAiClient } from "./openai"; import { ChatControllerPool } from "./common/controller"; +import { createAnthropicClient } from "./anthropic"; export const LLMClients = { openai: createOpenAiClient, + anthropic: createAnthropicClient, }; export function createLLMClient( diff --git a/app/client/openai/config.ts b/app/client/openai/config.ts index b27534162..dda89c706 100644 --- a/app/client/openai/config.ts +++ b/app/client/openai/config.ts @@ -1,5 +1,3 @@ -import { COMMON_PROVIDER_CONFIG } from "../common/config"; - export const OpenAIConfig = { model: { model: "gpt-3.5-turbo" as string, @@ -12,9 +10,57 @@ export const OpenAIConfig = { frequency_penalty: 0, }, provider: { - name: "OpenAI", + name: "OpenAI" as const, endpoint: "https://api.openai.com", apiKey: "", - ...COMMON_PROVIDER_CONFIG, + customModels: "", + autoFetchModels: false, // fetch available models from server or not + + models: [ + { + name: "gpt-4", + available: true, + }, + { + name: "gpt-4-0314", + available: true, + }, + { + name: "gpt-4-0613", + available: true, + }, + { + name: "gpt-4-32k", + available: true, + }, + { + name: "gpt-4-32k-0314", + available: true, + }, + { + name: "gpt-4-32k-0613", + available: true, + }, + { + name: "gpt-3.5-turbo", + available: true, + }, + { + name: "gpt-3.5-turbo-0301", + available: true, + }, + { + name: "gpt-3.5-turbo-0613", + available: true, + }, + { + name: "gpt-3.5-turbo-16k", + available: true, + }, + { + name: "gpt-3.5-turbo-16k-0613", + available: true, + }, + ], }, }; diff --git a/app/client/openai/index.ts b/app/client/openai/index.ts index a452936de..827604b1b 100644 --- a/app/client/openai/index.ts +++ b/app/client/openai/index.ts @@ -3,12 +3,7 @@ import { fetchEventSource, } from "@fortaine/fetch-event-source"; -import { - API_PREFIX, - ApiPath, - DEFAULT_MODELS, - OpenaiPath, -} from "@/app/constant"; +import { API_PREFIX, ApiPath, OpenaiPath } from "@/app/constant"; import { ModelConfig, ProviderConfig } from "@/app/store"; import { OpenAI } from "./types"; @@ -21,7 +16,8 @@ import { getApiPath } from "@/app/utils/path"; import { trimEnd } from "@/app/utils/string"; import { omit } from "@/app/utils/object"; import { createLogger } from "@/app/utils/log"; -import { getAuthHeaders } from "../common/auth"; +import { getAuthKey } from "../common/auth"; +import { OpenAIConfig } from "./config"; export function createOpenAiClient( providerConfigs: ProviderConfig, @@ -35,12 +31,12 @@ export function createOpenAiClient( headers() { return { "Content-Type": "application/json", - ...getAuthHeaders(openaiConfig.apiKey), + Authorization: getAuthKey(), }; }, path(path: OpenaiPath): string { - let baseUrl = openaiConfig.endpoint; + let baseUrl: string = openaiConfig.endpoint; // if endpoint is empty, use default endpoint if (baseUrl.trim().length === 0) { @@ -206,59 +202,9 @@ export function createOpenAiClient( }, async usage() { - const formatDate = (d: Date) => - `${d.getFullYear()}-${(d.getMonth() + 1) - .toString() - .padStart(2, "0")}-${d.getDate().toString().padStart(2, "0")}`; - const ONE_DAY = 1 * 24 * 60 * 60 * 1000; - const now = new Date(); - const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1); - const startDate = formatDate(startOfMonth); - const endDate = formatDate(new Date(Date.now() + ONE_DAY)); - - const [used, subs] = await Promise.all([ - fetch( - `${this.path( - OpenaiPath.Usage, - )}?start_date=${startDate}&end_date=${endDate}`, - { - method: "GET", - headers: this.headers(), - }, - ), - fetch(this.path(OpenaiPath.Subs), { - method: "GET", - headers: this.headers(), - }), - ]); - - if (!used.ok || !subs.ok) { - throw new Error("Failed to query usage from openai"); - } - - const response = (await used.json()) as { - total_usage?: number; - error?: { - type: string; - message: string; - }; - }; - - const total = (await subs.json()) as { - hard_limit_usd?: number; - }; - - if (response.error?.type) { - throw Error(response.error?.message); - } - - response.total_usage = Math.round(response.total_usage ?? 0) / 100; - total.hard_limit_usd = - Math.round((total.hard_limit_usd ?? 0) * 100) / 100; - return { - used: response.total_usage, - total: total.hard_limit_usd, + used: 0, + total: 0, } as LLMUsage; }, @@ -266,13 +212,14 @@ export function createOpenAiClient( const customModels = openaiConfig.customModels .split(",") .map((v) => v.trim()) + .filter((v) => !!v) .map((v) => ({ name: v, available: true, })); if (!openaiConfig.autoFetchModels) { - return [...DEFAULT_MODELS.slice(), ...customModels]; + return [...OpenAIConfig.provider.models.slice(), ...customModels]; } const res = await fetch(this.path(OpenaiPath.ListModel), { diff --git a/app/client/types.ts b/app/client/types.ts index 694059e1c..24753869f 100644 --- a/app/client/types.ts +++ b/app/client/types.ts @@ -1,5 +1,3 @@ -import { DEFAULT_MODELS } from "../constant"; - export interface LLMUsage { used: number; total: number; @@ -14,8 +12,6 @@ export interface LLMModel { export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; -export type ChatModel = (typeof DEFAULT_MODELS)[number]["name"]; - export interface RequestMessage { role: MessageRole; content: string; diff --git a/app/components/config/anthropic/model.tsx b/app/components/config/anthropic/model.tsx new file mode 100644 index 000000000..c55225958 --- /dev/null +++ b/app/components/config/anthropic/model.tsx @@ -0,0 +1,79 @@ +import { ModelConfig } from "@/app/store"; +import { ModelConfigProps } from "../types"; +import { ListItem, Select } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { InputRange } from "../../input-range"; + +export function AnthropicModelConfig( + props: ModelConfigProps, +) { + return ( + <> + + + + + { + props.updateConfig( + (config) => (config.temperature = e.currentTarget.valueAsNumber), + ); + }} + > + + + { + props.updateConfig( + (config) => (config.top_p = e.currentTarget.valueAsNumber), + ); + }} + > + + + + props.updateConfig( + (config) => + (config.max_tokens_to_sample = e.currentTarget.valueAsNumber), + ) + } + > + + + ); +} diff --git a/app/components/config/anthropic/provider.tsx b/app/components/config/anthropic/provider.tsx new file mode 100644 index 000000000..f06fc71a2 --- /dev/null +++ b/app/components/config/anthropic/provider.tsx @@ -0,0 +1,70 @@ +import { ProviderConfig } from "@/app/store"; +import { ProviderConfigProps } from "../types"; +import { ListItem, PasswordInput } from "../../ui-lib"; +import Locale from "@/app/locales"; +import { REMOTE_API_HOST } from "@/app/constant"; + +export function AnthropicProviderConfig( + props: ProviderConfigProps, +) { + return ( + <> + + + props.updateConfig( + (config) => (config.endpoint = e.currentTarget.value), + ) + } + > + + + { + props.updateConfig( + (config) => (config.apiKey = e.currentTarget.value), + ); + }} + /> + + + { + props.updateConfig( + (config) => (config.version = e.currentTarget.value), + ); + }} + /> + + + + props.updateConfig( + (config) => (config.customModels = e.currentTarget.value), + ) + } + > + + + ); +} diff --git a/app/components/config/index.tsx b/app/components/config/index.tsx index b08fe0608..22f9089f7 100644 --- a/app/components/config/index.tsx +++ b/app/components/config/index.tsx @@ -11,6 +11,10 @@ import { OpenAIProviderConfig } from "./openai/provider"; import { ListItem, Select } from "../ui-lib"; import Locale from "@/app/locales"; import { InputRange } from "../input-range"; +import { OpenAIConfig } from "@/app/client/openai/config"; +import { AnthropicModelConfig } from "./anthropic/model"; +import { AnthropicConfig } from "@/app/client/anthropic/config"; +import { AnthropicProviderConfig } from "./anthropic/provider"; export function ModelConfigList(props: { provider: LLMProvider; @@ -24,16 +28,17 @@ export function ModelConfigList(props: { updateConfig={(update) => { props.updateConfig((config) => update(config.openai)); }} - models={[ - { - name: "gpt-3.5-turbo", - available: true, - }, - { - name: "gpt-4", - available: true, - }, - ]} + models={OpenAIConfig.provider.models} + /> + ); + } else if (props.provider === "anthropic") { + return ( + { + props.updateConfig((config) => update(config.anthropic)); + }} + models={AnthropicConfig.provider.models} /> ); } @@ -55,6 +60,15 @@ export function ProviderConfigList(props: { }} /> ); + } else if (props.provider === "anthropic") { + return ( + { + props.updateConfig((config) => update(config.anthropic)); + }} + /> + ); } return null; diff --git a/app/components/config/openai/provider.tsx b/app/components/config/openai/provider.tsx index b905b130d..b3479e986 100644 --- a/app/components/config/openai/provider.tsx +++ b/app/components/config/openai/provider.tsx @@ -3,6 +3,8 @@ import { ProviderConfigProps } from "../types"; import { ListItem, PasswordInput } from "../../ui-lib"; import Locale from "@/app/locales"; import { REMOTE_API_HOST } from "@/app/constant"; +import { IconButton } from "../../button"; +import ReloadIcon from "@/app/icons/reload.svg"; export function OpenAIProviderConfig( props: ProviderConfigProps, @@ -58,6 +60,7 @@ export function OpenAIProviderConfig( props.updateConfig( diff --git a/app/components/model-config.tsx b/app/components/model-config.tsx deleted file mode 100644 index 00734382c..000000000 --- a/app/components/model-config.tsx +++ /dev/null @@ -1,139 +0,0 @@ -import { ModalConfigValidator, ModelConfig, useAppConfig } from "../store"; - -import Locale from "../locales"; -import { InputRange } from "./input-range"; -import { ListItem, Select } from "./ui-lib"; - -export function _ModelConfigList(props: { - modelConfig: ModelConfig; - updateConfig: (updater: (config: ModelConfig) => void) => void; -}) { - return null; - /* - const config = useAppConfig(); - - return ( - <> - - - - - { - props.updateConfig( - (config) => - (config.temperature = ModalConfigValidator.temperature( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - { - props.updateConfig( - (config) => - (config.top_p = ModalConfigValidator.top_p( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - props.updateConfig( - (config) => - (config.max_tokens = ModalConfigValidator.max_tokens( - e.currentTarget.valueAsNumber, - )), - ) - } - > - - - { - props.updateConfig( - (config) => - (config.presence_penalty = - ModalConfigValidator.presence_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - { - props.updateConfig( - (config) => - (config.frequency_penalty = - ModalConfigValidator.frequency_penalty( - e.currentTarget.valueAsNumber, - )), - ); - }} - > - - - - - ); - */ -} diff --git a/app/components/settings.tsx b/app/components/settings.tsx index ffe3850f0..6eca784ac 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -37,8 +37,6 @@ import { useUpdateStore, useAccessStore, useAppConfig, - LLMProvider, - LLMProviders, } from "../store"; import Locale, { @@ -578,22 +576,6 @@ export function Settings() { console.log("[Update] remote version ", updateStore.remoteVersion); } - const usage = { - used: updateStore.used, - subscription: updateStore.subscription, - }; - const [loadingUsage, setLoadingUsage] = useState(false); - function checkUsage(force = false) { - if (accessStore.hideBalanceQuery) { - return; - } - - setLoadingUsage(true); - updateStore.updateUsage(force).finally(() => { - setLoadingUsage(false); - }); - } - const accessStore = useAccessStore(); const enabledAccessControl = useMemo( () => accessStore.enabledAccessControl(), @@ -610,7 +592,6 @@ export function Settings() { useEffect(() => { // checks per minutes checkUpdate(); - showUsage && checkUsage(); // eslint-disable-next-line react-hooks/exhaustive-deps }, []); @@ -806,6 +787,28 @@ export function Settings() { + + {showAccessCode ? ( + + { + accessStore.update( + (config) => (config.accessCode = e.currentTarget.value), + ); + }} + /> + + ) : ( + <> + )} + + @@ -875,56 +878,6 @@ export function Settings() { - - {showAccessCode ? ( - - { - accessStore.update( - (config) => (config.accessCode = e.currentTarget.value), - ); - }} - /> - - ) : ( - <> - )} - - {!accessStore.hideUserApiKey ? <> : null} - - {!accessStore.hideBalanceQuery ? ( - - {!showUsage || loadingUsage ? ( -
- ) : ( - } - text={Locale.Settings.Usage.Check} - onClick={() => checkUsage(true)} - /> - )} - - ) : null} - - res.json()) @@ -48,9 +48,7 @@ export const useAccessStore = createPersistStore( set(() => ({ ...res })); if (res.disableGPT4) { - DEFAULT_MODELS.forEach( - (m: any) => (m.available = !m.name.startsWith("gpt-4")), - ); + // disable model } }) .catch(() => { diff --git a/app/store/chat.ts b/app/store/chat.ts index 2a66a359b..1dff506d9 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -2,20 +2,9 @@ import { trimTopic } from "../utils"; import Locale, { getLang } from "../locales"; import { showToast } from "../components/ui-lib"; -import { - LLMProvider, - MaskConfig, - ModelConfig, - ModelType, - useAppConfig, -} from "./config"; +import { MaskConfig, useAppConfig } from "./config"; import { createEmptyMask, Mask } from "./mask"; -import { - DEFAULT_INPUT_TEMPLATE, - DEFAULT_SYSTEM_TEMPLATE, - StoreKey, - SUMMARIZE_MODEL, -} from "../constant"; +import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant"; import { ChatControllerPool } from "../client/common/controller"; import { prettyObject } from "../utils/format"; import { estimateTokenLength } from "../utils/token"; @@ -85,11 +74,6 @@ function createEmptySession(): ChatSession { }; } -function getSummarizeModel(currentModel: string) { - // if it is using gpt-* models, force to use 3.5 to summarize - return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel; -} - function countMessages(msgs: ChatMessage[]) { return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0); } @@ -291,6 +275,18 @@ export const useChatStore = createPersistStore( return this.extractModelConfig(maskConfig); }, + getMaxTokens() { + const maskConfig = this.getCurrentMaskConfig(); + + if (maskConfig.provider === "openai") { + return maskConfig.modelConfig.openai.max_tokens; + } else if (maskConfig.provider === "anthropic") { + return maskConfig.modelConfig.anthropic.max_tokens_to_sample; + } + + return 8192; + }, + getClient() { const appConfig = useAppConfig.getState(); const currentMaskConfig = get().getCurrentMaskConfig(); @@ -463,7 +459,7 @@ export const useChatStore = createPersistStore( : shortTermMemoryStartIndex; // and if user has cleared history messages, we should exclude the memory too. const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex); - const maxTokenThreshold = modelConfig.max_tokens; + const maxTokenThreshold = this.getMaxTokens(); // get recent messages as much as possible const reversedRecentMessages = []; @@ -546,7 +542,6 @@ export const useChatStore = createPersistStore( }); } - const modelConfig = this.getCurrentModelConfig(); const summarizeIndex = Math.max( session.lastSummarizeIndex, session.clearContextIndex ?? 0, @@ -557,7 +552,7 @@ export const useChatStore = createPersistStore( const historyMsgLength = countMessages(toBeSummarizedMsgs); - if (historyMsgLength > modelConfig?.max_tokens ?? 4000) { + if (historyMsgLength > this.getMaxTokens()) { const n = toBeSummarizedMsgs.length; toBeSummarizedMsgs = toBeSummarizedMsgs.slice( Math.max(0, n - chatConfig.historyMessageCount), diff --git a/app/store/config.ts b/app/store/config.ts index 6f388a8b1..1fb6d6878 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -2,7 +2,6 @@ import { isMacOS } from "../utils"; import { getClientConfig } from "../config/client"; import { DEFAULT_INPUT_TEMPLATE, - DEFAULT_MODELS, DEFAULT_SIDEBAR_WIDTH, StoreKey, } from "../constant"; @@ -10,8 +9,7 @@ import { createPersistStore } from "../utils/store"; import { OpenAIConfig } from "../client/openai/config"; import { api } from "../client"; import { SubmitKey, Theme } from "../typing"; - -export type ModelType = (typeof DEFAULT_MODELS)[number]["name"]; +import { AnthropicConfig } from "../client/anthropic/config"; export const DEFAULT_CHAT_CONFIG = { enableAutoGenerateTitle: true, @@ -25,17 +23,13 @@ export type ChatConfig = typeof DEFAULT_CHAT_CONFIG; export const DEFAULT_PROVIDER_CONFIG = { openai: OpenAIConfig.provider, + anthropic: AnthropicConfig.provider, // azure: { // endpoint: "https://api.openai.com", // apiKey: "", // version: "", // ...COMMON_PROVIDER_CONFIG, // }, - // claude: { - // endpoint: "https://api.anthropic.com", - // apiKey: "", - // ...COMMON_PROVIDER_CONFIG, - // }, // google: { // endpoint: "https://api.anthropic.com", // apiKey: "", @@ -45,6 +39,7 @@ export const DEFAULT_PROVIDER_CONFIG = { export const DEFAULT_MODEL_CONFIG = { openai: OpenAIConfig.model, + anthropic: AnthropicConfig.model, // azure: { // model: "gpt-3.5-turbo" as string, // summarizeModel: "gpt-3.5-turbo", @@ -55,15 +50,6 @@ export const DEFAULT_MODEL_CONFIG = { // presence_penalty: 0, // frequency_penalty: 0, // }, - // claude: { - // model: "claude-2", - // summarizeModel: "claude-2", - // - // max_tokens_to_sample: 100000, - // temperature: 1, - // top_p: 0.7, - // top_k: 1, - // }, // google: { // model: "chat-bison-001", // summarizeModel: "claude-2", @@ -125,7 +111,7 @@ export function limitNumber( export const ModalConfigValidator = { model(x: string) { - return x as ModelType; + return x as string; }, max_tokens(x: number) { return limitNumber(x, 0, 100000, 2000); diff --git a/src-tauri/tauri.conf.json b/src-tauri/tauri.conf.json index e530203f6..666d81be7 100644 --- a/src-tauri/tauri.conf.json +++ b/src-tauri/tauri.conf.json @@ -9,7 +9,7 @@ }, "package": { "productName": "ChatGPT Next Web", - "version": "2.9.9" + "version": "3.0.0" }, "tauri": { "allowlist": { From 17e57bb28e67d13048c9123b76b4c642020a3c14 Mon Sep 17 00:00:00 2001 From: xiaotianxt Date: Sat, 30 Mar 2024 11:30:23 +0800 Subject: [PATCH 03/26] feat: update apple-touch-icon.png --- public/apple-touch-icon.png | Bin 14949 -> 12762 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/public/apple-touch-icon.png b/public/apple-touch-icon.png index a76212ae0c2a5ca6854145d272f09b62077b061a..6461c47c206b35ace8837ec10db42b0118993fbd 100644 GIT binary patch literal 12762 zcmZ{L1y~%xvMv@NI0R>LcXwFa-8Hzo2X}|y?(P=cEjYnF*y0e}9UeL7ynD{O@7~_8 zr>eW^FX^tG*>7exLQ!4<5e^p)3=9lWN>Wt$gEs!{un-@|`r3HY4+8G2EFlb5GYL5Q z7?hZ5N}0*Yfzf>Muwc;OXkd_kRX#po;J9F){=tKRse|MF7q16S^)H|rS*mKfXv)d*7~9)27@F7{nKHQB zI{Za}@wxMSu(qZyhD7eRHg?WD?))VG(BS#t|4uWK5dA~N#hRZ)Q%;dc#NNr2h=YNN zfr$hNM?^%#=VW5Wqbw@^FZjoZpTxq&#es*B@!Pj=4BuE8?48URnYp>S8JSoZSy<>l zH0Yf@>|6}p>Fu0J|7qm^+7UH%Hg>XfaIv(vBl>IC(8%7^g`b4vucQBJ|BTbr-SXd_ z?419V)<=Sje|gP>E8lUwYRqc{!jJ9 z$N1m1{+j{0j}135bTJe)bTI|8FtM=FGqKXMGO02%^RRI9Ff-9Iaq%#Hr196x|D*dK zcK$92kEXJzv%QV$-;D%vvM_x#M&`dqKF0q~{NJJfMylCax&T@J1^I8&{~e|EZ~6b^ z@IS@>9jf4DY5Gx)|A>|OU*7(6?qB+RjDIWgKPvQ}<^PY_kNO9~eJsoW{VoCF@)e{g zz`*ELq(p^O-N8?@wGzpeTsK)OE4(~Ya(tMc4b1B7(bWS{ER~4@@+@ZIq*6pY<4FKp zSbm?vdcrq9m>xwWd!y-Q!5Jbb93r8UaRX@9pq2Zk`wH3jN$&fNv8w9I4w_21^<&U9pyBL0+2jUkpMdpGZ>Q3coNdXjqJkhkT70z+h52vlQAce6TrnMoy1{foJU+mX8&i@)v==#mHI?C*qz6J<8%5p*` z&$P52&mmeTnLQQ}=LcIY%?UKXTA$4rNen`+D~zExp2B#gTH!Mt=M=HrSgV#+?y}}x z91AnM+`d`%m{oPddeJdMM)Hq6&EqnM7xpH!b${dR5wl+uXd_K%Uk!mBh;-M5Q)>TII|dgtGj~^Tf^wzjPSU z8Xe=j!%xv0Q1`_})w=)xJpotL(-T z(Id!&rAP2V@D>&MA`w?h>-LX$UM8nFZ;3GhkU%v0x|x1vzuesltt#9>Xy|q2@UH9T z_vy{p#NpHF&0?2}VMJ3SWOD6go1=)Qg~H2?Nbvz7Ex&7LqhBf;iIb^*h}oK<=x22c zQy_oh+gZczD`Vo9gIOG z$l(+TY*J7D`CR0#YVn=qJ}8?%D)7`uduW4r6l|U-yoaDPvQP zTr=vOjcy5^SJ4RjBEv`5&;-1xa>_(G?MlC$l{N)leD`;WEJWmj!g*u1sjUkblvSS% z^4dNZ(0NMqQ_tTM4>y3QmQiqSgWrOH2!P#0i7L#VgNUcc+evRb+^~^$M}?tF-A*!# zfqOsBRYy9do{?}%l?b|j?&%ilJC|gGlE*hnLg7qt+LZqhqG4Cka>lx7n_4*KvF9gp zya)$S&Sog;zEajmEl`$@C5(O_ydSIW(e*Wu=nO9hb+FUc5{ZAJGEl-Moz@o`Xt+jQ zS1Z8`6j#^y8LAp?|I_4VHnF(pP0IBB3{AoN4zAx-+tn|LtQC=VBXbbO1!0U}8OIoD z>Z{b*1RCh5e((l#w^B^sHzP9SBTs{}+{4??((|JFx!BE;o@X8+oM_RBy2O5@41C9N z3->M^U5^4E?HiUFL$TL4(f2z>8e~ES);Uux7$%r7(|zeCfel~K7eR|WXY)u3t2uOj z$G+jx(l2K|Ro=~6&<@iF;{!+wY4z^5lY@pVn0%6L$wjN&c_W4g~V1dNsF(;#;WzhjfT+^XO{i5)|%++vr>P* zaw{gD5fMP@;u#qLs;GdM=aen+yZm_-t5;={6{#^3<3fe$Xe10uk!O%g2JLw2U&I&G z9T0qfpU9P+)#v!`R_3AD=FGMw*ncfOwa=~9msCPIHP^~E<7&`-O!Zp?_d;>BW)4F> zO`YcGIyOb`+3nMpNN*hhZ+=xs-o`wYvKxi>Kd#X#5$THrygD}`_ovZ<*C}!Z%IUeq z`x64y%^`TkNP=S}t+$jd9PGk|FD0DAvHY55ak4E{u$fcAzO()l1W+3-McBJA%@VcU ziW|*Z;yEvot>}dPug1tJZ&yuY@Uh9$XB9Fc{Fx;MJ1{c4;w8%F6Th4gz8MY}f4#`Z zPSAWiPA+V8fczd+`z|Eyl9}Tby^*$?#L4VhuhwzJf23wUcMv?AccB+)-fode@lZDA zva%x~^ZrL7nKFj?hwX1I92x%rtm@Sk(Rq(04xcbH+&w!1fqp0KY0WX%GzYuyle3bx zyFHZ(DFj@QS|7j9q()aItidNH0*eeq*teM8~gioeWeK!QLiIWsw`LmlL(SNEH` zUn~nvKeC|c37Rh2P0r;h%B5vn(>dh6I%#yJ7v?l+m=VQM$&{y?so8E}lGynvsloaD zNgJ{Js)5#7*D)xRAmOLE8^eP>XLr$jxPIVMn5=Ian zdxj;L*-Z`)17AAr**e9PWSEo^oIt;pu(1BxDNm#n_64zf%Jm zmH%qd$Ej!vloEpe%kKr$b~V+l0rd56;aE2(O5$x9iUly7E^EW#x+*&OJ}34eTB@F| z>1M$@MAjIS=)_ZaPaA4D??sNz-W?LKJ4n`Ajf)?FCyfU|hJ8l$u6uy<`UKSTD}MpK zVSs`zA^d%>G;ywt$HX;0ennihbH-v~PJ?8FSFmF-B!h{9f&hx%t!kP3HqS1d&e~{N zl09dI&`4WxA-Kr+0Qteg+^e?~>PW&avInku&WtiTTE|7y;>I8su|5e!8p$}vDl6GD zU?zxSAjBgqSh5j*s@*vN4su+=qOs#=uxA&-=i{I30Q|=C)(e^3TyWxAvftC_1ktYs zJ`*=750ri*t68&s{Eh4tKRC#ZHxEsw^SqC@u0+MEvMUg6!s|~CAlcy~96ggX#}|2Z zB`>VT6gyF6JPB(Uv|d6l<1*5E6z=nn^4i{p@EA$mbGf-^^f@wRj?w#{by94v_V3I$ zt6wg^xvx@Ij`Qf^l`8*&MigKxkZj9(YA9!`AON{kNztjol6kzI)$NE}2=^ur!q^2D z^OMO~rQdFC#2*27d+O&mt6QBtZ3dE4;Y+Na^!)Pl9jl4k7Th5c^nsUqeeI z4i5cA(u!LyfBoD$_qKj?>y~Ivm6JDhhvaLBV}X9$(HMJwK_0KVvmSyGN=%&j5k!Z* z7|_y7TvaxcH2G32=Lz9iO>HXrkQj=Qui5&>H}jt&7b0 zJ^PG&4gpA}Y)RKvnw;>pm1}R-n*_7-Yv)Gkx5bp&4G9RqY;q(v)&-Cp_#znbxlH>_ zX73Xjc+1GML@e+Q03G}h|`|v@-m~O~dN10Nrvd1N5k>s^grBJTK z{b2C8GHJ^kv`XmLA-fue=?@EM)HX^gvHj|0vYzwMHCc7Fk6|?xfnEzes;@xN zc(rMn;r=M)zp;=_USjXEZS!47y+{O0~N#&7IlFL`!IN*uO;(M^m=((pOjbQ|6B55l+aq!sc(w1>Qf<}x+$NVvfG z;jrRaMHz{N)%(OvzaLO6u$MBUx?22iIG@Lf`HFto4h$nFS?Ja+yc`C=1l*07oR5|x zJb)(U(*>S|_J%hN*|~>Ew@yJp&oW=Wv~LegPpjH0wJj2j$H4z^IKvUKG!ZrsMLY7t zJuF~zbENKa;eXt=R&S1&_lBEjIarmnvFaCPRdaChdjt_IVeBR2DOi1XMfLSkn991# z2-t$e0QeHUU2D+t)lzoXbX~Z6FrNFXzCs5=-=avOG4wxL>tU4%IgxT_d?Jja@sT9w zHm?HTG6HxL>2Xj^m(=A>ol+bokLx}ufx$!;B%Z0Zh% zZROi_c8U#$-KV_WJSQV73{@TfAdfzzei2>((18@R6ekuh^9?Smw6f~#PTX?O$?miL+HU>c^%7^f~GaB3V-DXjp zroEaBQuWr0Z_bkbs3z{Hp63{0o+pg*k5U}KRavM0^9_ZbcEt{tpG+1hBIVC6cr087 z;ja2ON7%5-%}j}~a45$4vG8+#jf{7?cM->W4fzQyUw$4>N1o{SUv1UU&Z-{56R$By z#Hlfzp;U@)WAG44=grE1!=YZ|jxJ=59++2HjSe`OR3+>$T-%O-{Pj1m4Rg~m)V>@S z*(B5@8^8wA+XU6&q z7@JhycJr(F_kp=>771dlQoHQ)rFj|%g{_E%TkhRYd+CbTu?)&E`OHfU(+M8uJNzp$ z=^@Mx|6V}6aef&#M|Q(LKFg{a6k!F&neKLT2XyEa1U#noR-2%lnCC>vWW784NdJ^x zm5+nb(WH;CheAZ$lON(20PJ@T)AyQix!I~M5DQKpU#F*=>DA1I;cIOE)ORHux5_|d zFV&_`(aUi#-P~S3%T?LQPoTouGCC1&aL*MnE`;*p+B$=wc%NoQ|M2G4O{5M|%;t=A zZ7xms=2sQ**&^f9HKY)V(4S<&o+!;Bae=4Vvx3Tmo(O-jFt=^k3%TSHZjKJfr_og5 z&W92<2XoS`wF$qHjhW*^d?ymsa=$xMkrXGxlAe%QOWJI_yIPnmEZ`C}FibvZV216edEIlt&+q>X&Hq z5CZryo(LIcHPx(0uOqU?8uZozhPX3+V`(oZ9#oVvF1kQPeHHVhD2-F`1BgJvEQC?U z_03o|PvU8{=GId@_?r$w`JMU`2=f6o3({2-{C0onGN}4v2rjBj&T3CYtk(&hz6L|>-}GC zfKyde!J1z#Qt$mr8CAIpTpUMDcF2;X);SLxdWCNLhMi+mw1UkQusfr;Py)?yP_KjJ z0UPchhljeiuwS`6j@Ex5?6ukE@Ft_1Xr-`~}*ErYNVPE2iZ1J)xD^&q9vnJ6^CQ~H!@SBuCxi7WGP{tzvmmT5T zaz$?x$|@%bQKp;{b4GOqeaW*aspwgZkvkMRm^G#@$HM?L8L$3cWabFjN1UacZ{hJs z0_Oax%Ag<*O4i>j-T0Ev!q)xTiTgRhi>;StCw1nrT_6=JfneUQU>Q@6V3aIp^G zmbsD}K2^^e-UQ;4%tUQ}F&;p^k(O&S9W=zBPcbX+!na&mJj=*CQ#W$j0o@S7(ZOz7 zbTq;6-`{$?yY5EI|4eupgn)h4X~vzddcx)7GEu-AZ`JzTmjw1Q$3_H?D&ZdtXRVcz zxBY3@h-O{cQeV_ajQ%YzXR(A%r;_gDnq&-F+Q`^RT$rN^4%I5xp#|WAh;VLIDlK{~ zjm2WzG=ZC5Q6@8R3d#$&o}PevA)=cMKIkN=VoT*GAHw?)F6G;2xS{?n!JviPNYO;@ zJ*m&Osq7NoUY}cSU6so9Oq((=R@Fb`weAS8GtcsQ#e{XhR?jF~gqeHOOYALLOWgJ{&x6@5aYAz1RyDiG$0LpxyL#KvBTUe^i;6??8C%J zRtBq3Q0d(S=cMhYroX?>sZQg;3noEuGJAx36t%Rl^OcepG@tM+in({CTqt2b;s(TGpCYI2(Q#$vu{brj3I63|v9_gUy} zJZ0MNUOIP{z?vw`r|U)4!?QsishlCZwK~^OHf)GI2nVH{Pi@(pt6BZ4i-kn0^9g8N#M8-gXC7J_%B?W{ zrBAgf4sj`@gYylFH_7HSznps=elAR?FrZE7rVJE`?Jv-<%2Ri&pIIb<2@-M3Cr1j#~9vJtk#9*M&=eAXFE=j3m&f8Pe74D0y?+{83tg=2<&7lRAz}r zjmDqC2em_gMiR%Q#+%hzvR?}^7uz)v;%Aaq-q)#JuOVlhiLo^mXYSg86dN&bR+S7z zC`}dj-wottHGE8aCBngTLa9@`e_ z%=urr9Z6>jtcKf;ZrSg3w=aP6?&t#E$XV zawEpLz7MWtYg(&WzBS9XrM8HpDjFfh`MkPaW!`UnlGB30!$CO2m2C)Q+bv?mMc11< zPTXF07M`y&sZDN3&PATQ9m7uURkjF6f@>kQ{%+>x z@g&;lZer*patX}xxPTUHy*LXko75X>l(P_J@1T643}m9Akb9ufiu z=IS(Gj$5@Pe-CZH>q7P+5ZQDBFnc~07S0KebxkA3z!!6OLWj>k(jqTSn62q~-StSz z>AEcf)3Ux3v|Tc91Lg7j)E)_APvj6ID>}3yY3jS%i)?1g%^Ls>@643a=H4^}?|Et- zUNUbe1>;Bv#)X|H6&7UgU2nv8VG<|;V@8Ysf0af9(FzajT>TU}K!8K6J4ek=36;E4 z2ty-*r0VYV_zu*2S?F+X;2|9i8rhXdedvW?mX9}Q*VW#s>TWm_TmWgN3|+)GDT5Pc zjX6CPRAVGh(I8{vFeU7D2h?_ZpiXyxz5Ft~+u#)#>fk}e#`hG8a8~jR7B6Vl>Nqme zE2(Tz@$-3X7+LPZwaJTh#@@H*;rgB5*z@drD5Dot$peimC3C6H>)TVk z>hzGcqQg3>0czX~nn-($xNNvz06q&@xC6%~!AewJd=+S?`X1~3-0PaP0q1$f_mWzn zF73OSD=eq$D}8Mx0u{?7<`G!f#GG>}Y$xYAnXLB_cmmUr*b<3lfY2b4saogJ4CIv- zKF$~Dm5Q>g8#$YajKdh~ak5P9HRgl&KNsV5HHsmMgbPQeO$=&zi#nyc%Jy(b?nm?bUyi;<8fKa;VkU;*~Fp zl!j&Flh9Hit#KR66U;z9)=;k6_ZNz*?;B3lpOZ>{8)l7_waZ;R3Q><5R@JXTr)_qX zyGkB;rpw-7MNM($;Kb8rRG*7lvZ`PGAzd_pXG)RU#~lR`_S=|yHe$|4%Pe2H?3JaoRa{JLJeweFpr?EQF2VyLKA#*(ag@l0il~f~%eFPrmd-#g-}CZ7iX8b6 ztO@10@}HHq`UZ?R*$iu1wNbHMpLD3laL`Y1#RU&q*V#RNB5?qk8C51f`%}LC08$Wr5${?md;$rJIO2 zV17ng(I6QLO2u~=^4lVSnYC^AAOLZ-}TrLm7NtE+!uxWFcLW?D}Jj9l-wJaUou>?!)Q8mR+EsCYbT-0slI zN=`1u$pBRiTn@d2623&tNb6MD!96Q)Io zsIGZdPM6%tQ84!dwZVhE`KxQdz^@9Pk+mz`9g19*h8c0LI;WfLNeI#FNc2!1@#=7?jD zGQBF=W_6e?v(EMx_Wa|0wduz{yq!lGGrm*=K0IVKCY$!%GSiu8;0p26q9u#=xv07V zkwnH-J2rK8mu`nQX?{=D4thmuI_%gq@~;(jYM*lOuFl87r1N6aT4?9*E!<`Cyo#hhn#{KPu-&Xsyfv;ycvj0c#N#jIF~JzmatS4}h)WKhdJ3YG2F)~5(jDrUxxFC2i79PF zi3rnyJCW#;lR=5E2<9r_UA6fz&UldJ#+#wp>7B#{ArF1pdn`uVS?zg!=3b;^WGIQc z(XjHYZ+5d3>9@o0_Mf~lz=97e2P4UQjB0SAi8`ovo)sFvT&=nYDCA`|T5YjKg}3xB zG&wj76beJ|!tE+})tAe?o_Fr-Q=K*SxmZP^DvR!>tBBqcbw;|U8+5MiV!7SDgo_$w zD{o}{U94WcAmdQe+__6y6z&a;4NalVR7nJRUw+*Lukl5jEk7Zw(6)=cRA?;{0_J6A zE>Et_jkZl-oO^JrmQaPm36l&9@EzS&%#@_jg^Y!9KT@pC+b)pW8PS^FLatbejltar z2!rDT3zWXbqu>O3MEguJD>Uoc#3-j|FN!46X#K3FCV_%wvFN;UAVK?qakrBwlZKcb znJAnGcv3^>ismeNtXY$-*_4XGr|M|YG84dUMQ(?WAxeWDvvGGQRHvOvaNgLs#m)UP z-9obttDWfB7*=6t27J00|8uh+LVva3g0orjbQN+J_EwHQuPe1+S_zWH=t@Ko6c@HG z7-3@ksar5N4Nj*G0*rdcpMMb{Lf|F26<H?#Z;ArEcF)aP+aT$O^ExJw=k zboO-48E29@pSGb>Xd9NXpZfq5>t8meMp2d^X)-N&bK*WJ88M;buvBgh3k7>$>}~Vr z(yh0O*tA$oVrpk~P&v4-VmFAM&&@3`P9aYk*Gb`EDbtQPk)i*LGu*4IAkR2a7; zOVctc&*WTd)a5${3ZeHUXU+v@7?`_44O+PfB(3i;nk9Ey@DKX$hHtsR3YIvtAu(&~ zhq40>QH>uUL8y^LkK)IWVB7QImHC09MkAql4D_`PVm7T?8IpH+B_LF`&&A&CyNCHv zlJ^|~HK`-1etIw@&_d#Df}-Vop^mWQTar)&w!kOIy1I1QSTOKNtxo_K!+=0=2oqSu z)9}cj75$-l>NhV}ga@1M-y97iA~4-}BbgI;Vt=X8afz~fPW*BNIFGpnym~Q>%5T0n zwpY@wr9(y4FT1fNNIJvbTenyV_!)0yD_AwqA)4H@jZJ2wQ>l@8F=H@$;WFq|Uga4J zaLD~&Sa471U`4h<8(6uLrwO16^YCG&aadE{dkjpJ-76W^iKDm-)8k^ zEe(PWTw63?akzIlk3jF%{%yXU{v2T#1d(WS1=XK>Sf<>!GBFun3BDAJq>1#+y+Ig=U0+NjatAcbVDN^SZFsMtdV`diWD;M9AY-?JIrMcrh(5fCj)QL?Ns7 z{&CL*pL`h^Q*TdhX_35Uip-nBy%(WCL2#~WL`7p6mU z*%S&WpZybPNWmpjHsA|EUE-sJAeHv{<#4=M|JCiyudC}~<6vR+UQ@FuEl()E{Bfi5 zcT7l5|IWeN6Hv#?bIka0AbRs?krnkT?eOU2(M<^DnBVP=Tr6RAu*%c#y#VOnQD3$1 ztvRjVkNhS(UzKiWCnP0;j0)J5{Ky#CTbfDs#=E1H(adB^Ddwtmj5rofR|i{8#*qoi zm=@Q6R0oT(jj&omN0+TW(k`%KRbbUD^bS{hfztMoy(o;obVXer^>sFV(edkt0Y_S3 zP~b8u9-l;IAH5mcjVCP@c;6bb7hrF5jdHoYfRWL zB*L31V2fGM|Eu;FR&3HJ1b4>YX5sFGZ)4=nKf_^WHnQbr4qw}}8lPWw5)mA6V40Ro zT=35Xqm`ykpX(cfkj=ZFN4JOd7hI?gpKyE{aq@Akx~lAWI7)D&WDw*qtvgo^jxl9Z zg-AryenAVS9ths;TA^7w^XV-VQ3J^oq0q*#rG|-m^P@8zz~MrZ?y<652Vk5ND><CHJ;OFiI5fPif9ocd8~@^Tx;%Rqkh3za@@yS{>BPr$O$)yX$8w)T|ky4MGRh zo0r3yq%EpO1$>e&N}fN(EN~}jS|6YpLtPcO9#~v;n#d|b30+e-5=TStR{nVZ@Mopq z08iRDRR0fmqx8MhMVew4&qP3SPpnqldhy3eYt?7xl3?UER?ee=7O z&G*2aH9_RU1z<<4V>JlW@sBDQ5v%uewG8#s6xmfpS@xaA?E~hs%ji?I>J-ptYckmQ zbuN-va`7-zmD=vilPp%nW4r8ecf6Gv%tYum{)(}Q*{G)4n4gz$c>?VR2yNAaF-%J} zO~M7ygJ_h}z31iAU5J^9uNE6p$ZoE=izB+b5^&q3l%pGnq$~c!5zl^W{%tub8P!wS z`e!g4ixsWrB3MZvVm2HFdqjL~X8yoGH834^s)Z`!K##quBr34_vqVArpahg%e{u)V zD?wC|g-z2!>b++0K z9xm8hH771b2$(A1k}1DwsS$c8>j+Bjn4st+?|`_2WY$vXEaG;_C+ViDUC$0u;@Nt( z`LaIVaGIJnmbJpgoZ1+jc0l;=QU>6VAZw_wJ~<*ydr}z^gii9h4`#J;>`!qG2qI&x z$}KBKR;X$Qp1>XJE}1-huJ%t$o~fG|`#tzvi)?suY1 ziFODLSZ6@eq9nmX6_tgh*emy%E@0^yp%;qmfH3jU7uZ6>CV)TZWI=Bl41 zqnE~qsTPO0Q$@O1(G4kq(d<|VcQ`F!=2fa|8i};W06C1pXpY*144?H$8Qg*_*ochl z(DT`g`1ty22ms#;Q0mwqO5)8<^vj_Hhc3jyULnKk@CAY5qtZ�YJ$nNI7Dq*H02< z1s6N9l-Lw^A|XRDYLayUK>nmYBu_tZi!F{2*xwCTzxQ#_dMN2g z3`iL~1deFfoqYL>FzCb<;T!#(B0gHU*TSFl*EOtk==y9-ptdf4C|%?Xn8b9Lq_K^7 zAlJtT6;c9ad^tX`*FknfGRSxYSzexBcemgU!DR^U?rtH$3GVO!A-KB^?(P=c-JP#_|J@&Vv6vp| zqjjokSJm!cDoWBQpYT6HKtQ0#%1EjK*Se2?1UTSTXbg@WxWHJ5DT+ZrRL3H{7{dae zNlj$b6d@qIsUaW&Lm?oZfUg1%ARydWAs~)G5D@&S5D++ynJub$-bpkmb@Y8j!qDC4TAIf;1Cp0ltHU9d%ow@%oRlO6x zSd%ET;AWJbS3fb(*m3WR&M#&U;fMi|IXgY=>?|)oGK7MHQZ&1;kfUp46i~8cD_mYu zV*T5&5+^As>DOm*j8GLZRn_>qzkg*)=B#(AuJJi{EkrbR-QM)BOEfz=E!U}XvJBeK z0_)vP?iK45$Nv$O2c<;KZ*)dCOcLj3@qf4dvnC}##K|p{)D7ln#{nmA%wwEXdN z2-kyhKG%hsLBe*>a{qYF92a53nBMEs(l?wlect;vo)U)R9NG*`D;7zFAy@vaGGG?E z`uh4RYHH+k3=EjZx8ADXzRgkAF8S*E`Mnk+LNa7mjbqLnjMLN=jsJZ*PmZIXN#Jiy#C8M!x*K?2|yqiej<;m~a6OGaX$# zC~#!>=o^PkWv1YM*}(oRXM^8A&~1ltopkUv!UW$n(+ zIy&tUFof^b>0^5{qvqFEl&fBUT@=*f&`tQrLOpeNslw)P{OaoLC7zRhP~Ve8HB2(+ z3t^0F{tU0Gn!;W-v%GrCYp8xbYAzMg7A&7O{JGgb9hykB=(FGM9&_+r4by}#s8BNO z@eXe;nrH#nE^i{rec7T;`aeDdC;WU9DW6`Ox-gG9PIp7C>XSP>?ew@B7;W2ht*v$Jj z>6dFVR4)*JIuTsmM!r-btv#&gySgg$NEvVUoamF1zbFUO)wZRb#5YahukxM$$D)-& zaLznuU4Ql8f8>Xbp#8!@Tni?(pr{Z*pr@Srllw3A87&T++94!6mX0Y_`N;EK@NNYB zl?yC>rh777<37hGttbpkb-LyH!gj{Sr%yd8c($7zf(6o6+$F8Tsduam#~ZkGTn(oy}iOyfJJ; z(Q~dwe2&?shp4EiFbGe+Rla^t0%YaqTew=&{~w&*!L^r7r+!F?v~ddy32%(b_c&d= zqKzu0dEoqd*|=xhS9xZ0?Fc!;nIaJJXbVn^8C;+5;zvsbMRGQYCZv$&EA+(EHGOg! ziE5$V7n1U6TCDO^dCowg?k7?3cx9Ltisxh2AlUw2mGfL@ zO3k-+K@g_(d{05PO!H#dVQc(g~DCM_r-xAQpy&RYn1`yk8G)t2vwB3!quKnro zQ7RiYbk6`QH6L@|uCdj=i3wXuBK#T(ybOiTfWb>CbArqNuq!~5q>-Frw&iZ0C~kfh zmKrvK??~rUIhT%KQ1P9vF_TGY)T&YoQii&ei)MzfsN^~8qwPf4m+7R5Gt15~o#t$9 zJ2h2nBqhwn2Lt1wp9;3;IgpI!0Tn#6kr%cB`+b3ne2?}`Vku%LO9XEPM#YSVDwWc?VJEU5$kT*K94m7tR+F-zDRHYk9Ed;X7l{ zdm%8?N&tUnHWyVQPmla9SPN{^$Zl=Dn_>)2)SXPg+ZIWq(88-*hJNfw~RGFU(469zu|7vDKaqg`cr zd!E{HGoDtQJANeUM4MW~((dvHd=$VG5TvZ~muY?B4@)}}qIKIY9fMFkx_Rg%rJDJU zkjpx<<~!=EPC#aOo)m)O@bKqK*J8fVIB`SUd3?q*tt%W(hq5B~PPI&Ol~H^}#UpbJ z^JuAjS_4l#uQlq0?MI}vM#{e#`pSvSZRLXB?m`aU2Z>Xcs2yt?+||$h{`gumJA{;X ze!TDNDY&85;MrbB228a&tsLjvF`E4Zj^Y)!#$};*Pdv$R(KqZZ_Qz0j;c0N0TCoP# z@tqN{_jxa0M^;$Pt{lf&LQ)u#IA%Rb217-&N?@^g_9i+u97d2EE;e8KhF1ph)4Rhh zj4sC6>vhW&lJbc?gqR330Nb6e0XUmnVBa~g>ojngf?}_5=pFLygqU=$2-;btvpx%D z(IdSdnVIF9%1(VRU$PlJ9An6r^X-P9u~tAliV zUHhNSNjJGdH6~E!bTK=lv*ZUn0 zl8aXsea?5TzJ4vfnlbr(E0gq}1VVAic)&JtNY=1?bX&BW|1!^U8_~!&`lN_aWz51$ zlMp-$E=PgCy5*Y5sk%2(r2$%RDLIXo@xc^e1XfoT(k8sY-`el=(^N_ASS^p@zg z`@MimF9PM#wOOAW@w`dWP}8P=+o@2!8fp@SYQ=k*@AEoRdQC#_@)Ii$x=rm@J`UNx z3gNxdyZ#1ILyPI7%mN3tS>I55Vdn+2)tu|&z9xM2g&ki*<`kUYuqmqWKTXHeYz5Wm zla7YDNGiFO4?N#evKmKi1)?eU%AP=qEx{wIJ{F;IHMT=*NpLnGk2jpGZ3ejX>M2l- z$+ltb49I%CLBsH3B;sr1ST~A7{wH*GFW`RtJQYjp`O`J?(HyJawdgF13O&C^EO|Wy zP*%=jy4olDHqtu#Hp4u&FLq*hS1&b?ncv;8#+!nF^yyipcJk?M{4uFNMteaZD=Yr0U}adL`yJuB*4015bqK}<7OhA1aNXAg)~@@K!`ni)))np*n`RvTY~#E)SNp00 z%(#T*ac_9LmR#oAsD01;6Zbk{P>u*j2)toLPl7}LX7$3h z2px!h*Sak=Ds#r2Aw)E9fklRFb2xkhZ0h2otP+&2J^a>#(Ms&T^tw& zaC3w?_^buV*xnYabw&+4XL9*Mt1fMRuq^FSa3f5gAI1{cJOeyL9wd-^w;h#+)mBOK7vVB4MuiL`!}+`8?>Qj$TssVq;hGn! zaVB@GfwSZy3NEL2qGKQgAJmYs9SYe`i+jG=2wQdOrrYXo1t|gJmCnhfDg5EkZX?;b z)78RP7{jU)#D-N4gsm*!KM+%bb#IIeLhlm|5b0ugkFdBQuYDIV_S}l*om3Tn#$bZxJD5TiXXeOZXmIXeuc<>_94VJ;D1xeNM90}CwXUvdPBbu zosq!(kr_%LF?M_;>8;GdMKC3GBH2wOqy*(Ma9r# zk^YY*%c6~B>HNu4)B1Byu)>i|S-Fun3mj6Tim$GtAe4T7R=F-of30pct5U_{?I5Kg){a`=&K?{D5;qci$qb`G@B*`)yW4_#XgT2j^^m zOeuj|a?~yie4v(n21xcaR774rA>C>lncnT^gDsJIURfj)0#Rn5vHwfR*`An+hjb81 zEK*9Dm)|W8xRr!a7~32@w9>l(eSKBGu1>)&xL6b)?u!3>)qN!IZe(32P2PN%X2pAK z{i$Q|pGXOEzqSrMyc=n6mq}%{ReSZ#^hrQuFxNsjc+>1R1ga`DHYIU5LKhv>fonE@ z){BhCv#*gzrq?&#gu@Y;xv#B_1)oskt45g;hNu_ponQG>FtbroP8)((o2xxb+;*Xy{!$s1ulN1ZqtGK`1zT)EA zk^M(w{$hb9k1UcIhA#IB^|ScTM1tjXlx8hF&Lw!)-MiM}sH442PB+p~O&qqYbDrm z_989BgW-x^g3$The;{xLJk-k3^2|_SDFUj~bavR#ZDG5R7W1>Hynzrr+|F+IwOgvnSOo3V9 z7abs2jEi%zt&d#A{$ssno5pxF@{j_XBAH_xXx~!(dr@Idxh{ev&0L#2Ecr8Qcj85^ z6-i62hfeHI^*f2x#H96k@#;Bwg`9#}Y9r@z^_1-42=rZ)0h84S>Vzk7g(c9NR!9+X z+AALGOP^ z!v!bpCO!x$A6}QNaLsp%j~=(kk-Ga5ufiJVcemv9%W@_<=n>9ed7gvpPmO)biV?cr z8enW6@PvXf_+Y7h5Da~J`r!lCA&@xW(qO!2RnSr%ap|QMJ&$7VdyPBR(4_R>f*PhY z9=r99FIu1r095(EZNDo$jA&~}~UWZH(4_m?;&Mj9qjO;pp zP@O#wxnL85>M*D}8KlA%DI@d3AG0I0y( zH&R4!;>y|x`s3B?^!h$ExtbO}i%(~Q+%i1Fds8+qpmrr4EPLKN}d)DqI+Y{Z6 zO$?B0%y1pYgF-CPG)9Dm*hc|=>u475fp+OU%XHdpTrdHT9=7PCe#DwJAWU%~8GnS3 z-aJ9K^ll^}nAz6*0ppIb5lBTd!UK9{l4w)*w2D8~+aAsEoO(xZx0swaO^bZso;AdW zI~gcc*FEe>%GoL0E@_`9iPQ0o9Xrq{meUk&)EG@Tbr#*hl$b`-s@25-E%V;?x^ zVO|)-!2a9`3|yHIZOaK8IJM|HB$ymqtANa9o28hW5my!}%O>tum;Mi8VT}NSgN8@p z?w3xRNfjKLM&{dvZ-K~hGei=096xUH8*-I*b*i?CY{N zS;`|AdeZd6%kYv4PJ+8SG~w=ujX?ndJreDWT#)emE=bk@Abjf;wE%FwNbg%2kzbi{ zZfhpR%&hn!n?~*diCFI9@(fy$(GgU_96Yt2rTDa4!I(k24MT}xKrU6ICRSDSOU}9c zg{a3Jn`1#_5cFwT__|Lh7a?g)b~>!ER$xrOPU_G!J9JrgzbK8EqNjBxB%ifnG!1C_ zM%5%Y$14FU!R>npxFEg^1zuN0(PV(4dG06H4?7Rl$pOEtyQW1XpQgy5bJof7vQr1; zAKnf&P^j+aoqycD(anco(ue9S z)*Rkci`-9%p5lrt7mfHM2;DA(Z&kgpqotPVw_0LnPJCaSy~i)HxoXQ%cL@0$Btg@l}v#x{%Ufd9xwIJ>TT=T{K;G+rd0 zx!Q&DirM+H2Dsv|kZ$FbgoUg0vV>e&>Tgat34Qzc7qql@z_9f!>I z{3LmP^RxH2aZF!055^Jk0_blmj7kwhynqzK?f68#xy&o|(ou7Xt5i}Tu!g~kvZ2*0CT+quf~LDlJ^IVJM&8P;RW23Q1!UA8FO)T|9$S!h_}-4a7c3@{YP(q`P!tc_ z!sny>v#)s{!0)*$Ll>d~ZMH~e(*F)nA+0e5`{&m)PKrBiCKzY^h|RTA^8rVV5B=uB znfM|zmz5$12%CCi>$n|6P9L%~sE`aFR<`=EYx4QxVF{Yupd%=-DnGWoEZ&hCye!}L zkHIsm$zean>iG@<(haW2u-n!8TyWMaq(>aZmPfycf@{(spRw)affP!f#SPQuQ_aPR z3&9t0DZRyva&RsXp|!?OVcSj|eKFF4 zf21H53<_c^V5V7tQBL-kwK(Usb6j5Q6T^3^j64mi*n8MMXr{bRf+u%M z{te8qxdYb!>hF8T&^(Rzam4=)%&Ui6QrCMBLrzK~)r%wwUr!}ey`t=oE0g|)X)T}J}#UbFGn`bfLEVvSlrl0T#^gs(DhW>l$``-w> zM2ypI6Yk=x0imWr4{V5U3A0a7cH`U1ipp+DY)6itJZ_KUVp@6LaiJbtq;@Y7pTo5e`Kf993K(p+XHuA%)1;6rZ z?=jYB<7dSe9pu_6;-Hc_5`&X<}7$u7Ut2SMQ#I|i_tm)GT?<*7Fcl6c>(925-V$sL}eUk>=3)EA2vr>}Q6lC|qzOS@tRPHgA}+GwW~ zLD-@mbrVBA@Kc!*9rMTL8#u8rU|Xl)rL91lv%Bu1e|Kov-gvvCpg6hHED9^z0u_?z zY-&9v*}q0!EKueQ70BkD9^A{_6%ic7dO^e3Yqg6Y6v9#LQ`@@mjrjpfgsC`~I!6|E zrKcjF%%jJ+J^9xqtv5U~0%u0%R0p>%xBI7!&Sy9Yp)H9JLWSw+i@dh$DbK?L%K~XA zQb{T;c>yBiI(M3l=gun^ju6lP1X_c*wV}i1aMtT#zRcN{7Ta9;|0XKtcmFlI|NKOz zz+n|cjHrxR}NEYF#X*JMd8w+9C7W+6uGN`a3>wGN1rO>^0Uj3I_y z98i&s>Vs^_98x)dA&1*HJcUh|8^}uey}Q0d3>ngQZzX%nRWGBXp=a1QbSc%alU?4Z zGI{zP4w6Z*!&aMM%tGB3lRT&LIS1l-&gM2~XzK^8Cj9D6Za`d-FlW>s`*V3zICxnU za2oXhl$$HobpAxb>hsk`q;aalM0XlE&)u1m&z&|BJ2^i%ICv`^#V*!-K*Iyq|5X~l z+m(IN5Dfm!9B^g5=*m4kfnw449{-)Cf(e2;18r23hp~=tBZekKaPl9aY*)t}4B-*s zQ(W$6uprE=2@&NT%_6f@+4+hOo@LtBP}hh6f7*Pdg;8c zzX5lYPMWP4kWVH2I|wn^BzLemzIz=f5c3*h)HCyCXA4U?Vxq{GiInxMKII*66NFj~ z3x|3{l|gi|vM*8fEGh!Ym4I#CC=%@vgKE5u>?VjRIt4A-`Il9ZT#-iR?a46Wg#BW-<5>eKOt%8Yq_#7{L@jmW%SCL+(7BJgj$`WmrA07wtjG$^ z{(U3fsWFr_4r7hLMvMsDW~l^}a@Kz-%OvN7$+k`* zOT(IQwZiwK_hK_*yE{i|YQ=R3L#FImlH_TI3$yokk~wpJe>ouIX@P8P5Rd^&65p)g zy5e)_RW;kO>88N(4SwDn-82JTViS_8`tp#X)%U?#*t&H~uuw@TW)IGd@8+n36^O6n zto~k3VU({SuU?wsjy9a=o^#dwDd=dV#E|`^Ks`g&tY48NLpif&PN#P!GJ7_)sHF+c-g`i zW`G1!?%_V!-FIi4^oS~-pCOyzIM`66W{J?SfpvXhIBW-St?O)Vg01Xh^_Cos;45{X z&yfS)-&j`;bcCY&@d2otDeoi8C?-SI&v0)~{)r+FGvT$QyJ~qF>c!4i0fa z4rl|99VjNKyV*-r`&r7x8fIqfX|^>$1&kcX>UQzB9BoZh@+==}@7>SCZlHZN;yUzWOs$DmE$7)6BI+|2{}lZMf_Ak`@$Xz>YK($C_BsS*w$#Nj7r)rDc+O&`Mr2PJDt%85SVT2 zp7{Zi@Y|^;t~EssUR}0M;yv$8n`;IqfucDAkY^AXMn%*R)|%E3p25gX&?hgZ+tL0N zpXpNCTwsu~|KSV|?R{vgOtD5uWJE;4-=?M}QGkcRx1_4(cA3JX1T%@0-j4p~ceqr3 zb$kFlg|lyta>n z7FM!hx?Z%5Z;SnDc8)ydZ;a45C7}Pv$Vem=9i237V6%6fy^O`g#4dOEGORi11RRql zZEwLM!qF%YKoCRl);(R!Tyqgz^sK=6y8rKqb~u12)-{x5Vm@{sI6%`f6lo2GL z)ZA89rVw=lFp@r44WGiK=#!K_NA?wRMXyzwV_EtjC}@)C`#8jjjN8T)5pOYY(rw_K z<{qM-&!`3(2@#X?Pe^5tvwwz0qMXGs*bfzge_mR*LA5sex#qxQs3QGe+2{q_?~JDP zN6t@3r#l_|^cHO{iYE7^->aK6fT6(75&#oQ<%2)PDA2_2DZ4uyU}Ri3Go^sqmu~B? z4RVo?Hu^q^=U45?URC`t<(3ldu24LY1#vv_`XXDL|H{uzRd@phYVZbi^dYXCU37zE z{QbMSc1_Ggf8wXX;GxWFgbHacQ3O7UN{eu=?Tn3`{D_Vj*Qvjz*FWob&IfpA80;Bi zMM;T?GEFxI|C2-1*G3i&0A8@KjDWqXi#$|bs?B8Ab*l{2J!f??EyF1%Xg)K4R({mQ zUwQnBm2A7o+;n>2O|Te2em8rfJtN+VS@Bd4uj})aKyA6|&#jF`j4opRw|RqyHV&8h zyUbSOM7XhPZH9<$ccOeBmd(cG$AAp9w5f8#C;*ELzofEK5c24)EiHMel1@@}-dFq5bbrJ^OC*e;MRNOW>;|Zu@c9%u*5@n-U{(s!OmA3I$~SjT~Hw3PBSSr9T$V9mCUdHaw!MSEd;S9 zxb`Yte2>55lktGzm>u7;VDT_9@=jnVxsFV08|>MLf~4B+5`y0ZD54;piVf zeyFQ_um#O)AM0vr*sB5Rk5i3Fi8dp1sf}|-%8T%&qYqLfU=XeS46KsS)51&wUC|}l z5=~?carfw0o0R!_TuYnjlY}{njYER7qlPKc8aMS?4U9<$o7vLM1vM-tbl4SS0dvsW0p9+%B-nOfG1R!-!!%F+c9SxjV77Q;31ob1n z3-bIwG{=>ENC1{%GF0`ChZrj>4#$3V*Yx*wW6j35{&&HRHCZl%=y{Wbo>@HyXw^n}E1O+HR-BPjA?&_C*OR9*oc!?cJ+TWO}jRZ2x`Z85Gc4eNPiy z(fzP5QRws^tdy9gnGZT7WMBe1L}q9^=eX?l20GbL6hI>DU+2^WZ>L8vE{rqy4<`*sIWUFJj$^*Bl%B=6prnls8UE2p~VYnA4ueS=Mh{Yauju*Q=_gjgcU9(s1 zg9x*IDPUI4FS;H@9J_pxl5vUcr43kVgn%#*GK6VXpx%4MHx&J!jut>DWdlr6{fG2+ z;m|n6Y3X<=8Nyw(&f4ZJCsg{OjZ9a%7OH`uOp&K0w_X7C_16^SDDq|ICRA5xz&2zp zYyee4xZHGFg0Am5{C;^kvwe{v%V^_GR|Lb;MtZBD<2eVU=T$Uo+k+Br_WI}G=Xb2Y zM$O~g1@QX7It0*XF_Uu`}zz)G`Pt`PvvX7jiW2>4yR_a1~4rLMo~ ziC)Au(eWSJna?~L*zx-;>2olm=gtJAw$oP<#;O|hRNgeO)+lvX2uhTTs{S*bq4hR= zZPHp280u{At!u3FT}brweQ(vPD2LWOYdC)acKs>%znjT^Fg>+X9u zK|X{Xt0Z9o0a85)0Vv?u^J~#pGe4015y8dq0|TFpb4`f@z$6pJ< zFulfa7DG;{dE;Aa93Q9VB#R3_`n$}z!g^JTQc)qpxOepsXyQb5*f(s9*nBr|49z?v zg^5HDzT-y<=Y4ks!!u~NL0H=*8d~#x5R##h57hSsmDi8sv0lMizLS(IMF=NnOmwpM zodwnL%p=|w>m9x;pp{G7nK?4v`PXA}rB1a6bv=jpK^>X#o_$CiC!Y%~cI1c+fq)_R zGKIn(PTqE~Wa>M;IW#RxDiiO|Ze4YdCQd4oeMRoZwov*Mwp=jjNaQCdzGPcI6nnW; z3I#ip{Sg2PKusV6#BMWL*v+)7(2~>g6rr;pxQsl0^e^vr{5LqgDwnO~={RBm z$_M;C&Vsp-v{-}~xQ^uLUbuqH`|ktAcxB*myb)qO#l0+m%c}r5P{$H`-qZwk;wHiv z^zQP)<&U_tT3cVU{cS@6^YNP?Yn)s`n`c+``VMCyQ*Tmb4>T!BrQs=W&CxA9ajy`8 zb8{OyS?_+96DcowZgq-1;0D8NnW%gOUCnR^*hLfsW(&B+9N4$&O>0H9uFj>e4Ea{t zT@v<@PW4NF*=+3la65jB8?=o&k=iSCiiOV>=!N`#IQ_43_+>bxjp&|rr8|3ihD7_< zK&j&doc+eW+6Ybl)Ox58aq!`V)hC0#vLiQme}J&3@Fu=v7gd8l+UE8gP^6+Uih->P zKsE$2*Q1~HF7&x!R)T;5X`%#_L^;dl@P$9p!Qt)uj-9RZct^XsxlsqH0qLOE{!~vU zvnZtnTJ=q;;dR2Ul-^)cC#jb}{~m?%WgAJ>_^pk?H(D(G8b~^`iGI~prS2>R(?A(b z^uF&-kE>ko?OxwCFZWJ)LwA!Vx8qg}KTYTJb8|r@fZFDjFY^AI+T%j|q!YR>ZJZvFcSmCAE9&y8dIO4Hof^_LWh&$-O>CGR6Arm^wZ%7g-08Du8GEzOVz>_DlK=YgS(}+9TY~#Ew$+|k zc0Ocwrq^DOTV|Fg9Uz>02?u<>POjss%e&Pn@0gN=3u>|w`C8fk0G(B`5HhOlBDSRg zY%P5X&ICZ5e3g0=--{O5un5;T6qIJfPH{GH)eVhPTlC~)7Tb^kXs?%#Ry7B~dWw<9 z6-Jt^)%R;JutH$t^~Gy@r?uOW!6>+;ui+tGQL@o_ob{|SJ24U4dCZx1Iw~uR*P3tL z*?+T}pUbory#mA(GIo*~?Y|k=dqTIr4$^%7F#%0P|LhMgbyc6t z;NtxKEDq4EF@Qh@N8CW)w0^TvgsOFCa*?8aBUXmwsw}SO zAQ-)#KMkNX^5qkbr2cp-saI{^@qxXBY60V47{vD+NS!i=49_M0SAm(OqD=pKDq2e5 z%eaJNTV0Tv29UvZ*b?L*f%ubvM>x2I#4$hXxqri|k`f_#wN$J1cHH7CF>^U-x#Li( z|5$k}>C80FL1V_jI_eokuLUA@UWT<0X-Yx>@EwOF`P3*L(wTul>rg!dnpB07Rjd1$ zw(q-jyi6WVB(yD^LvgWz%NXYRWDwj=I zjutw{PPWsh#Q0_ITpOq^lNy%IXBF#Gu@<_7X8bkY#*+RwfO&xf@B7>KXmuD#|4-PmDy+>bt_F6ms?+yIa(R(y)laIQ|6}Y{!O(sACHeGm8t0DhgiNd8TA~_c41bu8Cuj^q52 z^aiveB9iJ;n7G7|8F_-!u+w3ZbPGDhv2`BI<`39Ek$qdvz~oC^hoDntZ7l=lEc3U$ z7W?_WSj19VbJum~HtcjneipCgs}wZ&h<4?oI2EO6kViw?JzvyJ5P=AYwxGze`$*hwH8|B z>kLH+xHP{lh}n^X(YL4&h>tOL%H$oXO`Ug>3zHb$UuTx7_bvL7NfMkgb^o(Z@VQ@2 zB3ak(h4b^Sz{#h~(!$x zT52tO9e$6oqQ-eHK9Mnx%jl}%#-ZlYqyHuXICMI#5aH6~`@YB*PLl&n=)Zu%idQn8 z7?1Y|MjZXeiV3X83_<)a;F1+6z|4%Do|~gj`#cMF&i>zPW*)PT!T4##ON-AeuMsJY z*vUMMSig2^*#@;~TsrmY0j7~lr%N?!V`d7jbY7{f15+Yd)9~Qou^31>owK;+<_3OY z{zF)xTN3W&g9F5q=zO@ya~U_2=aJnakrw0W0@B5%z+%7|zqi*|h$XCnk`5Zn+ux!M$EKW1Zh7YGltFktEx1h7%-rm0G zdCf;2PiVn3j2N2&V0c0C;Is!zOEjf@|*>vB?EjRmGK($_4|V8qq}RT^m5I8Uu| z<*Zv|Pw%|{^_*7GFZ0D1q7okmZ~ zj?>K2IY0*hDc4C19_!i-H;p)ozZEO7fktXxCV-XxV7$8$<5{o_e*NW@$`OaMX{#ay zgJX^^?hIsfdYwbBm(>9k<_w)gWNpnM%+Dk4FB6Jl^uIKsJHC@QAOzP~P817I34h+U@TDNUXWt zI}HG8{V3>?h;32)?2@T-<`qtFP~GB^q{Pnotr3_|xz}-r&wtLl;{@``0gmMw28JWB z?zPQOGO$`Ocrev~i2#_6U+(%p9-8Y0#U^48e9PFs{8R1Jg63lfxS|0o5k}dMz`Iz3 zAJ)bma)Se~zKo%hw#6+q<~1G0velPAT+A7p{M?qVL^#L!-p?0C+btKe0LSuf0wB?h z`V2ynIUPZt=y^~I82bkQbaMxCV^1d7k7)b{ z3xt)vRR|Lo^u5E2M(GhW=Zf_JPi9iNe${q0HFh=QH*q!tE)eXj?3_%jY)tGt8tfeW vtepH@+zhO&{H&~#X3R$aPXjv#Q!8_?|9^u#-sEkd0fg*VCCLhLQ1Jf&3pC`I From 15e595837be45d0fa2f5a429840950345801b7f9 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 2 Apr 2024 14:21:49 +0800 Subject: [PATCH 04/26] feat: settings command dev done --- app/components/chat.tsx | 9 +++++++++ app/components/emoji.tsx | 2 -- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 7b7b66bec..0d0ae93eb 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -979,6 +979,7 @@ function _Chat() { } }); }, + // set openai key & endpoint url settings: (text) => { try { const payload = JSON.parse(text) as { @@ -996,9 +997,17 @@ function _Chat() { if (!res) return; if (payload.key) { // TODO: auto-fill openai api key here, must specific provider type + config.update( + (config) => + (config.providerConfig.openai.apiKey = payload.key!), + ); } if (payload.url) { // TODO: auto-fill openai url here, must specific provider type + config.update( + (config) => + (config.providerConfig.openai.endpoint = payload.url!), + ); } }); } diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index 6f4dc62a9..495e48785 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -4,8 +4,6 @@ import EmojiPicker, { Theme as EmojiTheme, } from "emoji-picker-react"; -import { ModelType } from "../store"; - import BotIcon from "../icons/bot.svg"; import BlackBotIcon from "../icons/black-bot.svg"; From 3cb4315193d60ca0bd79aca49628045254967b01 Mon Sep 17 00:00:00 2001 From: butterfly Date: Sun, 7 Apr 2024 11:50:25 +0800 Subject: [PATCH 05/26] feat: clean codes --- app/api/anthropic/[...path]/route.ts | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts index 18eea0475..cf7f7a223 100644 --- a/app/api/anthropic/[...path]/route.ts +++ b/app/api/anthropic/[...path]/route.ts @@ -1,4 +1,3 @@ -import { type OpenAIListModelResponse } from "@/app/client/platforms/openai"; import { getServerSideConfig } from "@/app/config/server"; import { ANTHROPIC_BASE_URL, @@ -6,12 +5,10 @@ import { ApiPath, DEFAULT_MODELS, ModelProvider, - OpenaiPath, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "../../auth"; -import { requestOpenai } from "../../common"; import { collectModelTable } from "@/app/utils/model"; const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]); @@ -121,7 +118,7 @@ export async function request(req: NextRequest) { const fetchOptions: RequestInit = { headers: { "Content-Type": "application/json", - // "Cache-Control": "no-store", + "Cache-Control": "no-store", [authHeaderName]: authValue, "anthropic-version": req.headers.get("anthropic-version") || @@ -136,7 +133,7 @@ export async function request(req: NextRequest) { signal: controller.signal, }; - // #1815 try to refuse gpt4 request + // #1815 try to refuse some request to some models if (serverConfig.customModels && req.body) { try { const modelTable = collectModelTable( @@ -161,7 +158,7 @@ export async function request(req: NextRequest) { ); } } catch (e) { - console.error("[OpenAI] gpt4 filter", e); + console.error(`[Anthropic] filter`, e); } } console.log("[Anthropic request]", fetchOptions.headers, req.method); @@ -181,12 +178,6 @@ export async function request(req: NextRequest) { // to disable nginx buffering newHeaders.set("X-Accel-Buffering", "no"); - // The latest version of the OpenAI API forced the content-encoding to be "br" in json response - // So if the streaming is disabled, we need to remove the content-encoding header - // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header - // The browser will try to decode the response with brotli and fail - newHeaders.delete("content-encoding"); - return new Response(res.body, { status: res.status, statusText: res.statusText, From 768decde9370f6eecd83f65b6974b8af3a9cb792 Mon Sep 17 00:00:00 2001 From: butterfly Date: Sun, 7 Apr 2024 15:20:27 +0800 Subject: [PATCH 06/26] feat: parse response message --- app/client/platforms/anthropic.ts | 122 ++++++++++++++++++------------ app/constant.ts | 13 +++- app/store/chat.ts | 2 +- 3 files changed, 85 insertions(+), 52 deletions(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 03411e7a8..b8dd7b494 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -3,7 +3,7 @@ import { ChatOptions, LLMApi, MultimodalContent } from "../api"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; -import { MessageRole, RequestMessage } from "@/app/typing"; +import { RequestMessage } from "@/app/typing"; import { EventStreamContentType, fetchEventSource, @@ -237,45 +237,53 @@ export class ClaudeApi implements LLMApi { const shouldStream = !!options.config.stream; - const prompt = options.messages.map((v) => { - const { role, content } = v; - const insideRole = ClaudeMapper[role] ?? "user"; + const prompt = options.messages + .filter((v) => { + if (!v.content) return false; + if (typeof v.content === "string" && !v.content.trim()) return false; + return true; + }) + .map((v) => { + const { role, content } = v; + const insideRole = ClaudeMapper[role] ?? "user"; - if (!visionModel || typeof content === "string") { + if (!visionModel || typeof content === "string") { + return { + role: insideRole, + content: getMessageTextContent(v), + }; + } return { role: insideRole, - content: getMessageTextContent(v), + content: content + .filter((v) => v.image_url || v.text) + .map(({ type, text, image_url }) => { + if (type === "text") { + return { + type, + text: text!, + }; + } + const { url = "" } = image_url || {}; + const colonIndex = url.indexOf(":"); + const semicolonIndex = url.indexOf(";"); + const comma = url.indexOf(","); + + const mimeType = url.slice(colonIndex + 1, semicolonIndex); + const encodeType = url.slice(semicolonIndex + 1, comma); + const data = url.slice(comma + 1); + + return { + type: "image" as const, + source: { + type: encodeType, + media_type: mimeType, + data, + }, + }; + }), }; - } - return { - role: insideRole, - content: content.map(({ type, text, image_url }) => { - if (type === "text") { - return { - type, - text: text!, - }; - } - const { url = "" } = image_url || {}; - const colonIndex = url.indexOf(":"); - const semicolonIndex = url.indexOf(";"); - const comma = url.indexOf(","); - - const mimeType = url.slice(colonIndex + 1, semicolonIndex); - const encodeType = url.slice(semicolonIndex + 1, comma); - const data = url.slice(comma + 1); - - return { - type: "image" as const, - source: { - type: encodeType, - media_type: mimeType, - data, - }, - }; - }), - }; - }); + }); const modelConfig = { ...useAppConfig.getState().modelConfig, @@ -372,19 +380,30 @@ export class ClaudeApi implements LLMApi { } }, onmessage(msg) { - if (msg.data === "[DONE]" || context.finished) { + let chunkJson: + | undefined + | { + type: "content_block_delta" | "content_block_stop"; + delta?: { + type: "text_delta"; + text: string; + }; + index: number; + }; + try { + chunkJson = JSON.parse(msg.data); + } catch (e) { + console.error("[Response] parse error", msg.data); + } + + if (!chunkJson || chunkJson.type === "content_block_stop") { return finish(); } - const chunk = msg.data; - try { - const chunkJson = JSON.parse(chunk) as ChatStreamResponse; - const delta = chunkJson.completion; - if (delta) { - context.text += delta; - options.onUpdate?.(context.text, delta); - } - } catch (e) { - console.error("[Request] parse error", chunk, msg); + + const { delta } = chunkJson; + if (delta?.text) { + context.text += delta.text; + options.onUpdate?.(context.text, delta.text); } }, onclose() { @@ -430,12 +449,17 @@ export class ClaudeApi implements LLMApi { return [ { - name: "claude-instant-1", + name: "claude-instant-1.2", available: true, provider, }, { - name: "claude-2", + name: "claude-2.0", + available: true, + provider, + }, + { + name: "claude-2.1", available: true, provider, }, diff --git a/app/constant.ts b/app/constant.ts index 3417b8f5f..df8627778 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -301,7 +301,7 @@ export const DEFAULT_MODELS = [ }, }, { - name: "claude-instant-1", + name: "claude-instant-1.2", available: true, provider: { id: "anthropic", @@ -310,7 +310,16 @@ export const DEFAULT_MODELS = [ }, }, { - name: "claude-2", + name: "claude-2.0", + available: true, + provider: { + id: "anthropic", + providerName: "Anthropic", + providerType: "anthropic", + }, + }, + { + name: "claude-2.1", available: true, provider: { id: "anthropic", diff --git a/app/store/chat.ts b/app/store/chat.ts index 4f37c25b8..2b41f5af8 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -287,7 +287,7 @@ export const useChatStore = createPersistStore( session.lastUpdate = Date.now(); }); get().updateStat(message); - // get().summarizeSession(); + get().summarizeSession(); }, async onUserInput(content: string, attachImages?: string[]) { From 86b5c5585523c042a0a2ab451a5bfa50dd95872c Mon Sep 17 00:00:00 2001 From: butterfly Date: Sun, 7 Apr 2024 18:02:31 +0800 Subject: [PATCH 07/26] feat: roles must alternate between user and assistant in claude, so add a fake assistant message between two user messages --- app/client/platforms/anthropic.ts | 172 ++++-------------------------- app/store/chat.ts | 1 - 2 files changed, 20 insertions(+), 153 deletions(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index b8dd7b494..5b833dffd 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -69,31 +69,21 @@ const ClaudeMapper = { system: "user", } as const; +const keys = ["claude-2, claude-instant-1"]; + export class ClaudeApi implements LLMApi { extractMessage(res: any) { console.log("[Response] claude response: ", res); - return res.completion; + return res?.content?.[0]?.text; } - async chatComplete(options: ChatOptions): Promise { - const ClaudeMapper: Record = { - assistant: "Assistant", - user: "Human", - system: "Human", - }; + async chat(options: ChatOptions): Promise { + const visionModel = isVisionModel(options.config.model); const accessStore = useAccessStore.getState(); const shouldStream = !!options.config.stream; - const prompt = options.messages - .map((v) => ({ - role: ClaudeMapper[v.role] ?? "Human", - content: v.content, - })) - .map((v) => `\n\n${v.role}: ${v.content}`) - .join(""); - const modelConfig = { ...useAppConfig.getState().modelConfig, ...useChatStore.getState().currentSession().mask.modelConfig, @@ -102,142 +92,28 @@ export class ClaudeApi implements LLMApi { }, }; - const requestBody: ChatRequest = { - prompt, - stream: shouldStream, + const messages = [...options.messages]; - model: modelConfig.model, - max_tokens_to_sample: modelConfig.max_tokens, - temperature: modelConfig.temperature, - top_p: modelConfig.top_p, - // top_k: modelConfig.top_k, - top_k: 5, - }; + const keys = ["system", "user"]; - const path = this.path(Anthropic.ChatPath1); + // roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages + for (let i = 0; i < messages.length - 1; i++) { + const message = messages[i]; + const nextMessage = messages[i + 1]; - const controller = new AbortController(); - options.onController?.(controller); - - const payload = { - method: "POST", - body: JSON.stringify(requestBody), - signal: controller.signal, - headers: { - "Content-Type": "application/json", - // Accept: "application/json", - "x-api-key": accessStore.anthropicApiKey, - "anthropic-version": accessStore.anthropicApiVersion, - Authorization: getAuthKey(accessStore.anthropicApiKey), - }, - // mode: "no-cors" as RequestMode, - }; - - if (shouldStream) { - try { - const context = { - text: "", - finished: false, - }; - - const finish = () => { - if (!context.finished) { - options.onFinish(context.text); - context.finished = true; - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(path, { - ...payload, - async onopen(res) { - const contentType = res.headers.get("content-type"); - console.log("response content type: ", contentType); - - if (contentType?.startsWith("text/plain")) { - context.text = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [context.text]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - context.text = responseTexts.join("\n\n"); - - return finish(); - } + if (keys.includes(message.role) && keys.includes(nextMessage.role)) { + messages[i] = [ + message, + { + role: "assistant", + content: ";", }, - onmessage(msg) { - if (msg.data === "[DONE]" || context.finished) { - return finish(); - } - const chunk = msg.data; - try { - const chunkJson = JSON.parse(chunk) as ChatStreamResponse; - const delta = chunkJson.completion; - if (delta) { - context.text += delta; - options.onUpdate?.(context.text, delta); - } - } catch (e) { - console.error("[Request] parse error", chunk, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - }, - openWhenHidden: true, - }); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } - } else { - try { - controller.signal.onabort = () => options.onFinish(""); - - const res = await fetch(path, payload); - const resJson = await res.json(); - - const message = this.extractMessage(resJson); - options.onFinish(message); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); + ] as any; } } - } - async chat(options: ChatOptions): Promise { - const visionModel = isVisionModel(options.config.model); - const accessStore = useAccessStore.getState(); - - const shouldStream = !!options.config.stream; - - const prompt = options.messages + const prompt = messages + .flat() .filter((v) => { if (!v.content) return false; if (typeof v.content === "string" && !v.content.trim()) return false; @@ -285,14 +161,6 @@ export class ClaudeApi implements LLMApi { }; }); - const modelConfig = { - ...useAppConfig.getState().modelConfig, - ...useChatStore.getState().currentSession().mask.modelConfig, - ...{ - model: options.config.model, - }, - }; - const requestBody: AnthropicChatRequest = { messages: prompt, stream: shouldStream, diff --git a/app/store/chat.ts b/app/store/chat.ts index 2b41f5af8..6114e6053 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -496,7 +496,6 @@ export const useChatStore = createPersistStore( tokenCount += estimateTokenLength(getMessageTextContent(msg)); reversedRecentMessages.push(msg); } - // concat all messages const recentMessages = [ ...systemPrompts, From 0fbb560e906f04e3bad1af43eba51a7e5b97e3ca Mon Sep 17 00:00:00 2001 From: butterfly Date: Sun, 7 Apr 2024 20:05:19 +0800 Subject: [PATCH 08/26] feat: delete returned models in modals function of ClaudeApi instance --- app/client/platforms/anthropic.ts | 72 +++++++++++++++---------------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 5b833dffd..25318d311 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -189,8 +189,6 @@ export class ClaudeApi implements LLMApi { "anthropic-version": accessStore.anthropicApiVersion, Authorization: getAuthKey(accessStore.anthropicApiKey), }, - // mode: (!clientConfig?.isApp && pathObj.hostname === location.hostname ? "same-origin" : "cors") as RequestMode, - // mode: "no-cors" as RequestMode, credentials: "include" as RequestCredentials, }; @@ -309,43 +307,43 @@ export class ClaudeApi implements LLMApi { }; } async models() { - const provider = { - id: "anthropic", - providerName: "Anthropic", - providerType: "anthropic", - }; + // const provider = { + // id: "anthropic", + // providerName: "Anthropic", + // providerType: "anthropic", + // }; return [ - { - name: "claude-instant-1.2", - available: true, - provider, - }, - { - name: "claude-2.0", - available: true, - provider, - }, - { - name: "claude-2.1", - available: true, - provider, - }, - { - name: "claude-3-opus-20240229", - available: true, - provider, - }, - { - name: "claude-3-sonnet-20240229", - available: true, - provider, - }, - { - name: "claude-3-haiku-20240307", - available: true, - provider, - }, + // { + // name: "claude-instant-1.2", + // available: true, + // provider, + // }, + // { + // name: "claude-2.0", + // available: true, + // provider, + // }, + // { + // name: "claude-2.1", + // available: true, + // provider, + // }, + // { + // name: "claude-3-opus-20240229", + // available: true, + // provider, + // }, + // { + // name: "claude-3-sonnet-20240229", + // available: true, + // provider, + // }, + // { + // name: "claude-3-haiku-20240307", + // available: true, + // provider, + // }, ]; } path(path: string): string { From ef7617d545417fe10b3094530a62c59694063d6b Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 13:41:02 +0800 Subject: [PATCH 09/26] feat: configs about app client --- app/client/platforms/anthropic.ts | 2 +- next.config.mjs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 25318d311..673f32b11 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -356,7 +356,7 @@ export class ClaudeApi implements LLMApi { const isApp = !!getClientConfig()?.isApp; baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy" + ApiPath.Anthropic + ? DEFAULT_API_HOST + "/api/proxy/anthropic" : ApiPath.Anthropic; } diff --git a/next.config.mjs b/next.config.mjs index c8e7adb83..daaeba468 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -77,6 +77,10 @@ if (mode !== "export") { source: "/api/proxy/openai/:path*", destination: "https://api.openai.com/:path*", }, + { + source: "/api/proxy/anthropic/:path*", + destination: "https://api.anthropic.com/:path*", + }, { source: "/google-fonts/:path*", destination: "https://fonts.googleapis.com/:path*", From 5446d8d4a2a71c7e983af1538b25ed4ca7192483 Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 13:59:55 +0800 Subject: [PATCH 10/26] feat: fix illegal exports in app/api/anthropic/[...path]/route.ts --- app/api/anthropic/[...path]/route.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts index cf7f7a223..4264893d9 100644 --- a/app/api/anthropic/[...path]/route.ts +++ b/app/api/anthropic/[...path]/route.ts @@ -80,7 +80,7 @@ export const preferredRegion = [ const serverConfig = getServerSideConfig(); -export async function request(req: NextRequest) { +async function request(req: NextRequest) { const controller = new AbortController(); let authHeaderName = "x-api-key"; From 6dad353e1c940b33c2a243b70b9a604af3a8f794 Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 15:33:02 +0800 Subject: [PATCH 11/26] feat: call claude api not in credential 'include' mode --- app/client/platforms/anthropic.ts | 2 +- app/constant.ts | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 673f32b11..6472fd8bb 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -189,7 +189,7 @@ export class ClaudeApi implements LLMApi { "anthropic-version": accessStore.anthropicApiVersion, Authorization: getAuthKey(accessStore.anthropicApiKey), }, - credentials: "include" as RequestCredentials, + // credentials: "include" as RequestCredentials, }; if (shouldStream) { diff --git a/app/constant.ts b/app/constant.ts index df8627778..9570737d4 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -327,15 +327,6 @@ export const DEFAULT_MODELS = [ providerType: "anthropic", }, }, - { - name: "claude-3", - available: true, - provider: { - id: "anthropic", - providerName: "Anthropic", - providerType: "anthropic", - }, - }, { name: "claude-3-opus-20240229", available: true, From 63f9063255f150a53160d401e3965e4cff0a38eb Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 15:33:27 +0800 Subject: [PATCH 12/26] feat: call claude api not in credential 'include' mode --- app/client/platforms/anthropic.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 6472fd8bb..fea3d8654 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -189,7 +189,6 @@ export class ClaudeApi implements LLMApi { "anthropic-version": accessStore.anthropicApiVersion, Authorization: getAuthKey(accessStore.anthropicApiKey), }, - // credentials: "include" as RequestCredentials, }; if (shouldStream) { From 264da6798ca74ca51290d9c1281ee324d9a8628e Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 18:06:17 +0800 Subject: [PATCH 13/26] feat: remove duplicate Input Template --- app/store/chat.ts | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/app/store/chat.ts b/app/store/chat.ts index 6114e6053..53ec11dbf 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -126,6 +126,11 @@ function fillTemplateWith(input: string, modelConfig: ModelConfig) { let output = modelConfig.template ?? DEFAULT_INPUT_TEMPLATE; + // remove duplicate + if (input.startsWith(output)) { + output = ""; + } + // must contains {{input}} const inputVar = "{{input}}"; if (!output.includes(inputVar)) { From 9b982b408d28fddbc90c2d3e3390653e4f2889b4 Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 18:29:08 +0800 Subject: [PATCH 14/26] feat: fix no max_tokens in payload when calling openai vision model --- app/client/platforms/openai.ts | 41 +++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 408ee704e..7652ba0f2 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -40,6 +40,20 @@ export interface OpenAIListModelResponse { }>; } +interface RequestPayload { + messages: { + role: "system" | "user" | "assistant"; + content: string | MultimodalContent[]; + }[]; + stream?: boolean; + model: string; + temperature: number; + presence_penalty: number; + frequency_penalty: number; + top_p: number; + max_tokens?: number; +} + export class ChatGPTApi implements LLMApi { private disableListModels = true; @@ -98,7 +112,7 @@ export class ChatGPTApi implements LLMApi { }, }; - const requestPayload = { + const requestPayload: RequestPayload = { messages, stream: options.config.stream, model: modelConfig.model, @@ -112,12 +126,7 @@ export class ChatGPTApi implements LLMApi { // add max_tokens to vision model if (visionModel) { - Object.defineProperty(requestPayload, "max_tokens", { - enumerable: true, - configurable: true, - writable: true, - value: modelConfig.max_tokens, - }); + requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); } console.log("[Request] openai payload: ", requestPayload); @@ -229,7 +238,9 @@ export class ChatGPTApi implements LLMApi { const text = msg.data; try { const json = JSON.parse(text); - const choices = json.choices as Array<{ delta: { content: string } }>; + const choices = json.choices as Array<{ + delta: { content: string }; + }>; const delta = choices[0]?.delta?.content; const textmoderation = json?.prompt_filter_results; @@ -237,9 +248,17 @@ export class ChatGPTApi implements LLMApi { remainText += delta; } - if (textmoderation && textmoderation.length > 0 && ServiceProvider.Azure) { - const contentFilterResults = textmoderation[0]?.content_filter_results; - console.log(`[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, contentFilterResults); + if ( + textmoderation && + textmoderation.length > 0 && + ServiceProvider.Azure + ) { + const contentFilterResults = + textmoderation[0]?.content_filter_results; + console.log( + `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, + contentFilterResults, + ); } } catch (e) { console.error("[Request] parse error", text, msg); From 02b0e79ba371e9de9da9095a288b902a3c8a4f0a Mon Sep 17 00:00:00 2001 From: butterfly Date: Mon, 8 Apr 2024 19:27:22 +0800 Subject: [PATCH 15/26] feat: modify some propmt in DEFAULT_INPUT_TEMPLATE about expressing latex --- app/components/markdown.tsx | 8 ++++---- app/constant.ts | 10 +++++++++- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/app/components/markdown.tsx b/app/components/markdown.tsx index 7c70fe1a5..2b036051a 100644 --- a/app/components/markdown.tsx +++ b/app/components/markdown.tsx @@ -135,10 +135,10 @@ function escapeBrackets(text: string) { } function _MarkDownContent(props: { content: string }) { - const escapedContent = useMemo( - () => escapeBrackets(escapeDollarNumber(props.content)), - [props.content], - ); + const escapedContent = useMemo(() => { + console.log("================", props.content); + return escapeBrackets(escapeDollarNumber(props.content)); + }, [props.content]); return ( Date: Tue, 9 Apr 2024 09:12:18 +0800 Subject: [PATCH 16/26] feat: remove debug code --- app/components/markdown.tsx | 1 - 1 file changed, 1 deletion(-) diff --git a/app/components/markdown.tsx b/app/components/markdown.tsx index 2b036051a..1afd7de3b 100644 --- a/app/components/markdown.tsx +++ b/app/components/markdown.tsx @@ -136,7 +136,6 @@ function escapeBrackets(text: string) { function _MarkDownContent(props: { content: string }) { const escapedContent = useMemo(() => { - console.log("================", props.content); return escapeBrackets(escapeDollarNumber(props.content)); }, [props.content]); From d50812745211f6ef043a7fad8d50f3178e5a2290 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 10:45:09 +0800 Subject: [PATCH 17/26] feat: fix system prompt --- app/constant.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/constant.ts b/app/constant.ts index b5d57612a..7786d1b06 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -118,7 +118,7 @@ You are ChatGPT, a large language model trained by {{ServiceProvider}}. Knowledge cutoff: {{cutoff}} Current model: {{model}} Current time: {{time}} -Latex inline: \(x^2\) +Latex inline: \\(x^2\\) Latex block: $$e=mc^2$$ `; From 84681d3878bf0493806cff0538cbe2b031d5ebfc Mon Sep 17 00:00:00 2001 From: dlb-data <166484772+dlb-data@users.noreply.github.com> Date: Tue, 9 Apr 2024 16:24:03 +0800 Subject: [PATCH 18/26] Update layout.tsx --- app/layout.tsx | 1 + 1 file changed, 1 insertion(+) diff --git a/app/layout.tsx b/app/layout.tsx index 2c89ba494..70331e974 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -36,6 +36,7 @@ export default function RootLayout({ + From 598468c2b76588c882d4f8f7bf534155217a0c81 Mon Sep 17 00:00:00 2001 From: dlb-data <166484772+dlb-data@users.noreply.github.com> Date: Tue, 9 Apr 2024 16:34:21 +0800 Subject: [PATCH 19/26] Update layout.tsx --- app/layout.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/layout.tsx b/app/layout.tsx index 70331e974..5898b21a1 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -36,7 +36,7 @@ export default function RootLayout({ - + From 7fcfbc372921e85fb957dbe6cab35843d54a3872 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 16:49:51 +0800 Subject: [PATCH 20/26] =?UTF-8?q?feat:=20=E8=A1=A5=E5=85=85=E6=96=87?= =?UTF-8?q?=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.template | 11 +++++++++++ README.md | 12 ++++++++++++ README_CN.md | 12 ++++++++++++ 3 files changed, 35 insertions(+) diff --git a/.env.template b/.env.template index d53c1be6c..d5d0c4c27 100644 --- a/.env.template +++ b/.env.template @@ -47,3 +47,14 @@ ENABLE_BALANCE_QUERY= # If you want to disable parse settings from url, set this value to 1. DISABLE_FAST_LINK= + +# anthropic claude Api Key.(optional) +ANTHROPIC_API_KEY= + +### anthropic claude Api version. (optional) +ANTHROPIC_API_VERSION= + + + +### anthropic claude Api url (optional) +ANTHROPIC_URL= \ No newline at end of file diff --git a/README.md b/README.md index 429a02d63..c756b7bb6 100644 --- a/README.md +++ b/README.md @@ -200,6 +200,18 @@ Google Gemini Pro Api Key. Google Gemini Pro Api Url. +### `ANTHROPIC_API_KEY` (optional) + +anthropic claude Api Key. + +### `ANTHROPIC_API_VERSION` (optional) + +anthropic claude Api version. + +### `ANTHROPIC_URL` (optional) + +anthropic claude Api Url. + ### `HIDE_USER_API_KEY` (optional) > Default: Empty diff --git a/README_CN.md b/README_CN.md index 4acefefa5..0df271814 100644 --- a/README_CN.md +++ b/README_CN.md @@ -114,6 +114,18 @@ Google Gemini Pro 密钥. Google Gemini Pro Api Url. +### `ANTHROPIC_API_KEY` (optional) + +anthropic claude Api Key. + +### `ANTHROPIC_API_VERSION` (optional) + +anthropic claude Api version. + +### `ANTHROPIC_URL` (optional) + +anthropic claude Api Url. + ### `HIDE_USER_API_KEY` (可选) 如果你不想让用户自行填入 API Key,将此环境变量设置为 1 即可。 From 8b191bd2f733d8677c851d90a5003617bd1da937 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 18:05:56 +0800 Subject: [PATCH 21/26] feat: white webdav server domain --- .env.template | 5 ++- README.md | 7 +++++ README_CN.md | 7 +++++ app/api/webdav/[...path]/route.ts | 52 +++++++++++++++++++------------ app/config/server.ts | 5 +++ app/constant.ts | 2 ++ 6 files changed, 57 insertions(+), 21 deletions(-) diff --git a/.env.template b/.env.template index d5d0c4c27..fae5d8f4c 100644 --- a/.env.template +++ b/.env.template @@ -57,4 +57,7 @@ ANTHROPIC_API_VERSION= ### anthropic claude Api url (optional) -ANTHROPIC_URL= \ No newline at end of file +ANTHROPIC_URL= + +### (optional) +WHITE_WEBDEV_DOMAINS= \ No newline at end of file diff --git a/README.md b/README.md index c756b7bb6..d821093f4 100644 --- a/README.md +++ b/README.md @@ -245,6 +245,13 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model User `-all` to disable all default models, `+all` to enable all default models. +### `WHITE_WEBDEV_DOMAINS` (可选) + +如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: +- 每一个地址必须是一个完整的 origin +> `https://xxxx` +- 多个地址以`,`相连 + ## Requirements NodeJS >= 18, Docker >= 20 diff --git a/README_CN.md b/README_CN.md index 0df271814..07f426ee8 100644 --- a/README_CN.md +++ b/README_CN.md @@ -142,6 +142,13 @@ anthropic claude Api Url. 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 +### `WHITE_WEBDEV_DOMAINS` (可选) + +如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: +- 每一个地址必须是一个完整的 origin +> `https://xxxx` +- 多个地址以`,`相连 + ### `CUSTOM_MODELS` (可选) > 示例:`+qwen-7b-chat,+glm-6b,-gpt-3.5-turbo,gpt-4-1106-preview=gpt-4-turbo` 表示增加 `qwen-7b-chat` 和 `glm-6b` 到模型列表,而从列表中删除 `gpt-3.5-turbo`,并将 `gpt-4-1106-preview` 模型名字展示为 `gpt-4-turbo`。 diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 56c2388ae..58d591bfc 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -1,5 +1,14 @@ import { NextRequest, NextResponse } from "next/server"; -import { STORAGE_KEY } from "../../../constant"; +import { STORAGE_KEY, internalWhiteWebDavDomains } from "../../../constant"; +import { getServerSideConfig } from "@/app/config/server"; + +const config = getServerSideConfig(); + +const mergedWhiteWebDavDomains = [ + ...internalWhiteWebDavDomains, + ...config.whiteWebDevDomains, +].filter((domain) => Boolean(domain.trim())); + async function handle( req: NextRequest, { params }: { params: { path: string[] } }, @@ -14,7 +23,9 @@ async function handle( let endpoint = requestUrl.searchParams.get("endpoint"); // Validate the endpoint to prevent potential SSRF attacks - if (!endpoint || !endpoint.startsWith("/")) { + if ( + !mergedWhiteWebDavDomains.some((domain) => endpoint?.startsWith(domain)) + ) { return NextResponse.json( { error: true, @@ -25,6 +36,11 @@ async function handle( }, ); } + + if (!endpoint?.endsWith("/")) { + endpoint += "/"; + } + const endpointPath = params.path.join("/"); const targetPath = `${endpoint}/${endpointPath}`; @@ -42,10 +58,7 @@ async function handle( } // for MKCOL request, only allow request ${folder} - if ( - req.method === "MKCOL" && - !targetPath.endsWith(folder) - ) { + if (req.method === "MKCOL" && !targetPath.endsWith(folder)) { return NextResponse.json( { error: true, @@ -58,10 +71,7 @@ async function handle( } // for GET request, only allow request ending with fileName - if ( - req.method === "GET" && - !targetPath.endsWith(fileName) - ) { + if (req.method === "GET" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -74,10 +84,7 @@ async function handle( } // for PUT request, only allow request ending with fileName - if ( - req.method === "PUT" && - !targetPath.endsWith(fileName) - ) { + if (req.method === "PUT" && !targetPath.endsWith(fileName)) { return NextResponse.json( { error: true, @@ -101,7 +108,7 @@ async function handle( authorization: req.headers.get("authorization") ?? "", }, body: shouldNotHaveBody ? null : req.body, - redirect: 'manual', + redirect: "manual", method, // @ts-ignore duplex: "half", @@ -109,15 +116,20 @@ async function handle( const fetchResult = await fetch(targetUrl, fetchOptions); - console.log("[Any Proxy]", targetUrl, { - status: fetchResult.status, - statusText: fetchResult.statusText, - }); + console.log( + "[Any Proxy]", + targetUrl, + { + status: fetchResult.status, + statusText: fetchResult.statusText, + }, + fetchResult, + ); return fetchResult; } -export const POST = handle; +export const PUT = handle; export const GET = handle; export const OPTIONS = handle; diff --git a/app/config/server.ts b/app/config/server.ts index d18e4a1a6..596ef9cab 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -79,6 +79,10 @@ export const getServerSideConfig = () => { `[Server Config] using ${randomIndex + 1} of ${apiKeys.length} api key`, ); + const whiteWebDevDomains = (process.env.WHITE_WEBDEV_DOMAINS ?? "").split( + ",", + ); + return { baseUrl: process.env.BASE_URL, apiKey, @@ -112,5 +116,6 @@ export const getServerSideConfig = () => { hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY, disableFastLink: !!process.env.DISABLE_FAST_LINK, customModels, + whiteWebDevDomains, }; }; diff --git a/app/constant.ts b/app/constant.ts index 7786d1b06..48fca62c0 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -366,3 +366,5 @@ export const DEFAULT_MODELS = [ export const CHAT_PAGE_SIZE = 15; export const MAX_RENDER_MSG_COUNT = 45; + +export const internalWhiteWebDavDomains = ["https://dav.jianguoyun.com"]; From 4cb0655192281765fea2ef73e6bd620a961d1f70 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 18:17:00 +0800 Subject: [PATCH 22/26] feat: Optimize document --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index d821093f4..19b399819 100644 --- a/README.md +++ b/README.md @@ -247,10 +247,10 @@ User `-all` to disable all default models, `+all` to enable all default models. ### `WHITE_WEBDEV_DOMAINS` (可选) -如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: -- 每一个地址必须是一个完整的 origin +You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: +- Each address must be a complete origin > `https://xxxx` -- 多个地址以`,`相连 +- Multiple addresses are connected by ', ' ## Requirements From b175132854e5710d6635f8f58b9a690cd04a66e1 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 18:23:52 +0800 Subject: [PATCH 23/26] feat: Optimize var names --- .env.template | 2 +- README.md | 2 +- README_CN.md | 6 +++--- app/api/webdav/[...path]/route.ts | 10 +++++----- app/config/server.ts | 4 ++-- app/constant.ts | 2 +- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.env.template b/.env.template index fae5d8f4c..b2a0438d9 100644 --- a/.env.template +++ b/.env.template @@ -60,4 +60,4 @@ ANTHROPIC_API_VERSION= ANTHROPIC_URL= ### (optional) -WHITE_WEBDEV_DOMAINS= \ No newline at end of file +WHITE_WEBDEV_ENDPOINTS= \ No newline at end of file diff --git a/README.md b/README.md index 19b399819..0715dafdf 100644 --- a/README.md +++ b/README.md @@ -245,7 +245,7 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model User `-all` to disable all default models, `+all` to enable all default models. -### `WHITE_WEBDEV_DOMAINS` (可选) +### `WHITE_WEBDEV_ENDPOINTS` (可选) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: - Each address must be a complete origin diff --git a/README_CN.md b/README_CN.md index 07f426ee8..10b5fd035 100644 --- a/README_CN.md +++ b/README_CN.md @@ -142,11 +142,11 @@ anthropic claude Api Url. 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 -### `WHITE_WEBDEV_DOMAINS` (可选) +### `WHITE_WEBDEV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: -- 每一个地址必须是一个完整的 origin -> `https://xxxx` +- 每一个地址必须是一个完整的 endpoint +> `https://xxxx/xxx` - 多个地址以`,`相连 ### `CUSTOM_MODELS` (可选) diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 58d591bfc..27aaee690 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -1,12 +1,12 @@ import { NextRequest, NextResponse } from "next/server"; -import { STORAGE_KEY, internalWhiteWebDavDomains } from "../../../constant"; +import { STORAGE_KEY, internalWhiteWebDavEndpoints } from "../../../constant"; import { getServerSideConfig } from "@/app/config/server"; const config = getServerSideConfig(); -const mergedWhiteWebDavDomains = [ - ...internalWhiteWebDavDomains, - ...config.whiteWebDevDomains, +const mergedWhiteWebDavEndpoints = [ + ...internalWhiteWebDavEndpoints, + ...config.whiteWebDevEndpoints, ].filter((domain) => Boolean(domain.trim())); async function handle( @@ -24,7 +24,7 @@ async function handle( // Validate the endpoint to prevent potential SSRF attacks if ( - !mergedWhiteWebDavDomains.some((domain) => endpoint?.startsWith(domain)) + !mergedWhiteWebDavEndpoints.some((white) => endpoint?.startsWith(white)) ) { return NextResponse.json( { diff --git a/app/config/server.ts b/app/config/server.ts index 596ef9cab..c27ef5e44 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -79,7 +79,7 @@ export const getServerSideConfig = () => { `[Server Config] using ${randomIndex + 1} of ${apiKeys.length} api key`, ); - const whiteWebDevDomains = (process.env.WHITE_WEBDEV_DOMAINS ?? "").split( + const whiteWebDevEndpoints = (process.env.WHITE_WEBDEV_ENDPOINTS ?? "").split( ",", ); @@ -116,6 +116,6 @@ export const getServerSideConfig = () => { hideBalanceQuery: !process.env.ENABLE_BALANCE_QUERY, disableFastLink: !!process.env.DISABLE_FAST_LINK, customModels, - whiteWebDevDomains, + whiteWebDevEndpoints, }; }; diff --git a/app/constant.ts b/app/constant.ts index 48fca62c0..ce9b08d14 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -367,4 +367,4 @@ export const DEFAULT_MODELS = [ export const CHAT_PAGE_SIZE = 15; export const MAX_RENDER_MSG_COUNT = 45; -export const internalWhiteWebDavDomains = ["https://dav.jianguoyun.com"]; +export const internalWhiteWebDavEndpoints = ["https://dav.jianguoyun.com"]; From df3313971dd3e66abcf7dafbabc48f1630add8d2 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 18:24:22 +0800 Subject: [PATCH 24/26] feat: Optimize code --- app/api/webdav/[...path]/route.ts | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 27aaee690..f64a9ef13 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -116,15 +116,10 @@ async function handle( const fetchResult = await fetch(targetUrl, fetchOptions); - console.log( - "[Any Proxy]", - targetUrl, - { - status: fetchResult.status, - statusText: fetchResult.statusText, - }, - fetchResult, - ); + console.log("[Any Proxy]", targetUrl, { + status: fetchResult.status, + statusText: fetchResult.statusText, + }); return fetchResult; } From 908ce3bbd988c45dea10b552ede34cd051c99de5 Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 18:25:51 +0800 Subject: [PATCH 25/26] feat: Optimize document --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0715dafdf..633124ec7 100644 --- a/README.md +++ b/README.md @@ -248,8 +248,8 @@ User `-all` to disable all default models, `+all` to enable all default models. ### `WHITE_WEBDEV_ENDPOINTS` (可选) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: -- Each address must be a complete origin -> `https://xxxx` +- Each address must be a complete endpoint +> `https://xxxx/yyy` - Multiple addresses are connected by ', ' ## Requirements From 79f342439af8e4c8835c32398b58098acd6bd3dc Mon Sep 17 00:00:00 2001 From: butterfly Date: Tue, 9 Apr 2024 20:49:51 +0800 Subject: [PATCH 26/26] feat: Solve the problem of using openai interface protocol for user-defined claude model & add some famous webdav endpoints --- app/components/exporter.tsx | 3 ++- app/components/home.tsx | 3 ++- app/constant.ts | 12 +++++++++++- app/store/chat.ts | 5 +++-- app/utils/checkers.ts | 21 +++++++++++++++++++++ app/utils/model.ts | 12 ++++++++++-- 6 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 app/utils/checkers.ts diff --git a/app/components/exporter.tsx b/app/components/exporter.tsx index f3f085721..20e240d93 100644 --- a/app/components/exporter.tsx +++ b/app/components/exporter.tsx @@ -40,6 +40,7 @@ import { EXPORT_MESSAGE_CLASS_NAME, ModelProvider } from "../constant"; import { getClientConfig } from "../config/client"; import { ClientApi } from "../client/api"; import { getMessageTextContent } from "../utils"; +import { identifyDefaultClaudeModel } from "../utils/checkers"; const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , @@ -315,7 +316,7 @@ export function PreviewActions(props: { var api: ClientApi; if (config.modelConfig.model.startsWith("gemini")) { api = new ClientApi(ModelProvider.GeminiPro); - } else if (config.modelConfig.model.startsWith("claude")) { + } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { api = new ClientApi(ModelProvider.Claude); } else { api = new ClientApi(ModelProvider.GPT); diff --git a/app/components/home.tsx b/app/components/home.tsx index 26bb3a44c..ffac64fda 100644 --- a/app/components/home.tsx +++ b/app/components/home.tsx @@ -29,6 +29,7 @@ import { AuthPage } from "./auth"; import { getClientConfig } from "../config/client"; import { ClientApi } from "../client/api"; import { useAccessStore } from "../store"; +import { identifyDefaultClaudeModel } from "../utils/checkers"; export function Loading(props: { noLogo?: boolean }) { return ( @@ -173,7 +174,7 @@ export function useLoadData() { var api: ClientApi; if (config.modelConfig.model.startsWith("gemini")) { api = new ClientApi(ModelProvider.GeminiPro); - } else if (config.modelConfig.model.startsWith("claude")) { + } else if (identifyDefaultClaudeModel(config.modelConfig.model)) { api = new ClientApi(ModelProvider.Claude); } else { api = new ClientApi(ModelProvider.GPT); diff --git a/app/constant.ts b/app/constant.ts index ce9b08d14..1ad76870f 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -367,4 +367,14 @@ export const DEFAULT_MODELS = [ export const CHAT_PAGE_SIZE = 15; export const MAX_RENDER_MSG_COUNT = 45; -export const internalWhiteWebDavEndpoints = ["https://dav.jianguoyun.com"]; +// some famous webdav endpoints +export const internalWhiteWebDavEndpoints = [ + "https://dav.jianguoyun.com/dav/", + "https://dav.dropdav.com/", + "https://dav.box.com/dav", + "https://nanao.teracloud.jp/dav/", + "https://webdav.4shared.com/", + "https://dav.idrivesync.com", + "https://webdav.yandex.com", + "https://app.koofr.net/dav/Koofr", +]; diff --git a/app/store/chat.ts b/app/store/chat.ts index 53ec11dbf..eeddd8463 100644 --- a/app/store/chat.ts +++ b/app/store/chat.ts @@ -20,6 +20,7 @@ import { prettyObject } from "../utils/format"; import { estimateTokenLength } from "../utils/token"; import { nanoid } from "nanoid"; import { createPersistStore } from "../utils/store"; +import { identifyDefaultClaudeModel } from "../utils/checkers"; export type ChatMessage = RequestMessage & { date: string; @@ -353,7 +354,7 @@ export const useChatStore = createPersistStore( var api: ClientApi; if (modelConfig.model.startsWith("gemini")) { api = new ClientApi(ModelProvider.GeminiPro); - } else if (modelConfig.model.startsWith("claude")) { + } else if (identifyDefaultClaudeModel(modelConfig.model)) { api = new ClientApi(ModelProvider.Claude); } else { api = new ClientApi(ModelProvider.GPT); @@ -539,7 +540,7 @@ export const useChatStore = createPersistStore( var api: ClientApi; if (modelConfig.model.startsWith("gemini")) { api = new ClientApi(ModelProvider.GeminiPro); - } else if (modelConfig.model.startsWith("claude")) { + } else if (identifyDefaultClaudeModel(modelConfig.model)) { api = new ClientApi(ModelProvider.Claude); } else { api = new ClientApi(ModelProvider.GPT); diff --git a/app/utils/checkers.ts b/app/utils/checkers.ts new file mode 100644 index 000000000..4496e1039 --- /dev/null +++ b/app/utils/checkers.ts @@ -0,0 +1,21 @@ +import { useAccessStore } from "../store/access"; +import { useAppConfig } from "../store/config"; +import { collectModels } from "./model"; + +export function identifyDefaultClaudeModel(modelName: string) { + const accessStore = useAccessStore.getState(); + const configStore = useAppConfig.getState(); + + const allModals = collectModels( + configStore.models, + [configStore.customModels, accessStore.customModels].join(","), + ); + + const modelMeta = allModals.find((m) => m.name === modelName); + + return ( + modelName.startsWith("claude") && + modelMeta && + modelMeta.provider?.providerType === "anthropic" + ); +} diff --git a/app/utils/model.ts b/app/utils/model.ts index b2a42ef02..378fc498e 100644 --- a/app/utils/model.ts +++ b/app/utils/model.ts @@ -22,6 +22,12 @@ export function collectModelTable( }; }); + const customProvider = (modelName: string) => ({ + id: modelName, + providerName: "", + providerType: "custom", + }); + // server custom models customModels .split(",") @@ -34,13 +40,15 @@ export function collectModelTable( // enable or disable all models if (name === "all") { - Object.values(modelTable).forEach((model) => (model.available = available)); + Object.values(modelTable).forEach( + (model) => (model.available = available), + ); } else { modelTable[name] = { name, displayName: displayName || name, available, - provider: modelTable[name]?.provider, // Use optional chaining + provider: modelTable[name]?.provider ?? customProvider(name), // Use optional chaining }; } });