feat: add claude and bard

This commit is contained in:
Yidadaa
2023-11-07 23:22:11 +08:00
parent 5610f423d0
commit cdf0311d27
20 changed files with 580 additions and 394 deletions

View File

@@ -1,7 +1,7 @@
import { REMOTE_API_HOST, DEFAULT_MODELS, StoreKey } from "../constant";
import { REMOTE_API_HOST, StoreKey } from "../constant";
import { getClientConfig } from "../config/client";
import { createPersistStore } from "../utils/store";
import { getAuthHeaders } from "../client/common/auth";
import { getAuthKey } from "../client/common/auth";
let fetchState = 0; // 0 not fetch, 1 fetching, 2 done
@@ -39,7 +39,7 @@ export const useAccessStore = createPersistStore(
method: "post",
body: null,
headers: {
...getAuthHeaders(),
Authorization: getAuthKey(),
},
})
.then((res) => res.json())
@@ -48,9 +48,7 @@ export const useAccessStore = createPersistStore(
set(() => ({ ...res }));
if (res.disableGPT4) {
DEFAULT_MODELS.forEach(
(m: any) => (m.available = !m.name.startsWith("gpt-4")),
);
// disable model
}
})
.catch(() => {

View File

@@ -2,20 +2,9 @@ import { trimTopic } from "../utils";
import Locale, { getLang } from "../locales";
import { showToast } from "../components/ui-lib";
import {
LLMProvider,
MaskConfig,
ModelConfig,
ModelType,
useAppConfig,
} from "./config";
import { MaskConfig, useAppConfig } from "./config";
import { createEmptyMask, Mask } from "./mask";
import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_SYSTEM_TEMPLATE,
StoreKey,
SUMMARIZE_MODEL,
} from "../constant";
import { DEFAULT_INPUT_TEMPLATE, StoreKey } from "../constant";
import { ChatControllerPool } from "../client/common/controller";
import { prettyObject } from "../utils/format";
import { estimateTokenLength } from "../utils/token";
@@ -85,11 +74,6 @@ function createEmptySession(): ChatSession {
};
}
function getSummarizeModel(currentModel: string) {
// if it is using gpt-* models, force to use 3.5 to summarize
return currentModel.startsWith("gpt") ? SUMMARIZE_MODEL : currentModel;
}
function countMessages(msgs: ChatMessage[]) {
return msgs.reduce((pre, cur) => pre + estimateTokenLength(cur.content), 0);
}
@@ -291,6 +275,18 @@ export const useChatStore = createPersistStore(
return this.extractModelConfig(maskConfig);
},
getMaxTokens() {
const maskConfig = this.getCurrentMaskConfig();
if (maskConfig.provider === "openai") {
return maskConfig.modelConfig.openai.max_tokens;
} else if (maskConfig.provider === "anthropic") {
return maskConfig.modelConfig.anthropic.max_tokens_to_sample;
}
return 8192;
},
getClient() {
const appConfig = useAppConfig.getState();
const currentMaskConfig = get().getCurrentMaskConfig();
@@ -463,7 +459,7 @@ export const useChatStore = createPersistStore(
: shortTermMemoryStartIndex;
// and if user has cleared history messages, we should exclude the memory too.
const contextStartIndex = Math.max(clearContextIndex, memoryStartIndex);
const maxTokenThreshold = modelConfig.max_tokens;
const maxTokenThreshold = this.getMaxTokens();
// get recent messages as much as possible
const reversedRecentMessages = [];
@@ -546,7 +542,6 @@ export const useChatStore = createPersistStore(
});
}
const modelConfig = this.getCurrentModelConfig();
const summarizeIndex = Math.max(
session.lastSummarizeIndex,
session.clearContextIndex ?? 0,
@@ -557,7 +552,7 @@ export const useChatStore = createPersistStore(
const historyMsgLength = countMessages(toBeSummarizedMsgs);
if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
if (historyMsgLength > this.getMaxTokens()) {
const n = toBeSummarizedMsgs.length;
toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
Math.max(0, n - chatConfig.historyMessageCount),

View File

@@ -2,7 +2,6 @@ import { isMacOS } from "../utils";
import { getClientConfig } from "../config/client";
import {
DEFAULT_INPUT_TEMPLATE,
DEFAULT_MODELS,
DEFAULT_SIDEBAR_WIDTH,
StoreKey,
} from "../constant";
@@ -10,8 +9,7 @@ import { createPersistStore } from "../utils/store";
import { OpenAIConfig } from "../client/openai/config";
import { api } from "../client";
import { SubmitKey, Theme } from "../typing";
export type ModelType = (typeof DEFAULT_MODELS)[number]["name"];
import { AnthropicConfig } from "../client/anthropic/config";
export const DEFAULT_CHAT_CONFIG = {
enableAutoGenerateTitle: true,
@@ -25,17 +23,13 @@ export type ChatConfig = typeof DEFAULT_CHAT_CONFIG;
export const DEFAULT_PROVIDER_CONFIG = {
openai: OpenAIConfig.provider,
anthropic: AnthropicConfig.provider,
// azure: {
// endpoint: "https://api.openai.com",
// apiKey: "",
// version: "",
// ...COMMON_PROVIDER_CONFIG,
// },
// claude: {
// endpoint: "https://api.anthropic.com",
// apiKey: "",
// ...COMMON_PROVIDER_CONFIG,
// },
// google: {
// endpoint: "https://api.anthropic.com",
// apiKey: "",
@@ -45,6 +39,7 @@ export const DEFAULT_PROVIDER_CONFIG = {
export const DEFAULT_MODEL_CONFIG = {
openai: OpenAIConfig.model,
anthropic: AnthropicConfig.model,
// azure: {
// model: "gpt-3.5-turbo" as string,
// summarizeModel: "gpt-3.5-turbo",
@@ -55,15 +50,6 @@ export const DEFAULT_MODEL_CONFIG = {
// presence_penalty: 0,
// frequency_penalty: 0,
// },
// claude: {
// model: "claude-2",
// summarizeModel: "claude-2",
//
// max_tokens_to_sample: 100000,
// temperature: 1,
// top_p: 0.7,
// top_k: 1,
// },
// google: {
// model: "chat-bison-001",
// summarizeModel: "claude-2",
@@ -125,7 +111,7 @@ export function limitNumber(
export const ModalConfigValidator = {
model(x: string) {
return x as ModelType;
return x as string;
},
max_tokens(x: number) {
return limitNumber(x, 0, 100000, 2000);