mirror of
https://github.com/Yidadaa/ChatGPT-Next-Web.git
synced 2025-08-08 17:25:52 +08:00
Merge branch 'main' into main
This commit is contained in:
@@ -8,6 +8,7 @@ import {
|
||||
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
import { ClaudeApi } from "./platforms/anthropic";
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
|
||||
@@ -94,11 +95,16 @@ export class ClientApi {
|
||||
public llm: LLMApi;
|
||||
|
||||
constructor(provider: ModelProvider = ModelProvider.GPT) {
|
||||
if (provider === ModelProvider.GeminiPro) {
|
||||
this.llm = new GeminiProApi();
|
||||
return;
|
||||
switch (provider) {
|
||||
case ModelProvider.GeminiPro:
|
||||
this.llm = new GeminiProApi();
|
||||
break;
|
||||
case ModelProvider.Claude:
|
||||
this.llm = new ClaudeApi();
|
||||
break;
|
||||
default:
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
this.llm = new ChatGPTApi();
|
||||
}
|
||||
|
||||
config() {}
|
||||
|
404
app/client/platforms/anthropic.ts
Normal file
404
app/client/platforms/anthropic.ts
Normal file
@@ -0,0 +1,404 @@
|
||||
import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant";
|
||||
import { ChatOptions, LLMApi, MultimodalContent } from "../api";
|
||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { DEFAULT_API_HOST } from "@/app/constant";
|
||||
import { RequestMessage } from "@/app/typing";
|
||||
import {
|
||||
EventStreamContentType,
|
||||
fetchEventSource,
|
||||
} from "@fortaine/fetch-event-source";
|
||||
|
||||
import Locale from "../../locales";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
||||
|
||||
export type MultiBlockContent = {
|
||||
type: "image" | "text";
|
||||
source?: {
|
||||
type: string;
|
||||
media_type: string;
|
||||
data: string;
|
||||
};
|
||||
text?: string;
|
||||
};
|
||||
|
||||
export type AnthropicMessage = {
|
||||
role: (typeof ClaudeMapper)[keyof typeof ClaudeMapper];
|
||||
content: string | MultiBlockContent[];
|
||||
};
|
||||
|
||||
export interface AnthropicChatRequest {
|
||||
model: string; // The model that will complete your prompt.
|
||||
messages: AnthropicMessage[]; // The prompt that you want Claude to complete.
|
||||
max_tokens: number; // The maximum number of tokens to generate before stopping.
|
||||
stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
|
||||
temperature?: number; // Amount of randomness injected into the response.
|
||||
top_p?: number; // Use nucleus sampling.
|
||||
top_k?: number; // Only sample from the top K options for each subsequent token.
|
||||
metadata?: object; // An object describing metadata about the request.
|
||||
stream?: boolean; // Whether to incrementally stream the response using server-sent events.
|
||||
}
|
||||
|
||||
export interface ChatRequest {
|
||||
model: string; // The model that will complete your prompt.
|
||||
prompt: string; // The prompt that you want Claude to complete.
|
||||
max_tokens_to_sample: number; // The maximum number of tokens to generate before stopping.
|
||||
stop_sequences?: string[]; // Sequences that will cause the model to stop generating completion text.
|
||||
temperature?: number; // Amount of randomness injected into the response.
|
||||
top_p?: number; // Use nucleus sampling.
|
||||
top_k?: number; // Only sample from the top K options for each subsequent token.
|
||||
metadata?: object; // An object describing metadata about the request.
|
||||
stream?: boolean; // Whether to incrementally stream the response using server-sent events.
|
||||
}
|
||||
|
||||
export interface ChatResponse {
|
||||
completion: string;
|
||||
stop_reason: "stop_sequence" | "max_tokens";
|
||||
model: string;
|
||||
}
|
||||
|
||||
export type ChatStreamResponse = ChatResponse & {
|
||||
stop?: string;
|
||||
log_id: string;
|
||||
};
|
||||
|
||||
const ClaudeMapper = {
|
||||
assistant: "assistant",
|
||||
user: "user",
|
||||
system: "user",
|
||||
} as const;
|
||||
|
||||
const keys = ["claude-2, claude-instant-1"];
|
||||
|
||||
export class ClaudeApi implements LLMApi {
|
||||
extractMessage(res: any) {
|
||||
console.log("[Response] claude response: ", res);
|
||||
|
||||
return res?.content?.[0]?.text;
|
||||
}
|
||||
async chat(options: ChatOptions): Promise<void> {
|
||||
const visionModel = isVisionModel(options.config.model);
|
||||
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
const shouldStream = !!options.config.stream;
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
...useChatStore.getState().currentSession().mask.modelConfig,
|
||||
...{
|
||||
model: options.config.model,
|
||||
},
|
||||
};
|
||||
|
||||
const messages = [...options.messages];
|
||||
|
||||
const keys = ["system", "user"];
|
||||
|
||||
// roles must alternate between "user" and "assistant" in claude, so add a fake assistant message between two user messages
|
||||
for (let i = 0; i < messages.length - 1; i++) {
|
||||
const message = messages[i];
|
||||
const nextMessage = messages[i + 1];
|
||||
|
||||
if (keys.includes(message.role) && keys.includes(nextMessage.role)) {
|
||||
messages[i] = [
|
||||
message,
|
||||
{
|
||||
role: "assistant",
|
||||
content: ";",
|
||||
},
|
||||
] as any;
|
||||
}
|
||||
}
|
||||
|
||||
const prompt = messages
|
||||
.flat()
|
||||
.filter((v) => {
|
||||
if (!v.content) return false;
|
||||
if (typeof v.content === "string" && !v.content.trim()) return false;
|
||||
return true;
|
||||
})
|
||||
.map((v) => {
|
||||
const { role, content } = v;
|
||||
const insideRole = ClaudeMapper[role] ?? "user";
|
||||
|
||||
if (!visionModel || typeof content === "string") {
|
||||
return {
|
||||
role: insideRole,
|
||||
content: getMessageTextContent(v),
|
||||
};
|
||||
}
|
||||
return {
|
||||
role: insideRole,
|
||||
content: content
|
||||
.filter((v) => v.image_url || v.text)
|
||||
.map(({ type, text, image_url }) => {
|
||||
if (type === "text") {
|
||||
return {
|
||||
type,
|
||||
text: text!,
|
||||
};
|
||||
}
|
||||
const { url = "" } = image_url || {};
|
||||
const colonIndex = url.indexOf(":");
|
||||
const semicolonIndex = url.indexOf(";");
|
||||
const comma = url.indexOf(",");
|
||||
|
||||
const mimeType = url.slice(colonIndex + 1, semicolonIndex);
|
||||
const encodeType = url.slice(semicolonIndex + 1, comma);
|
||||
const data = url.slice(comma + 1);
|
||||
|
||||
return {
|
||||
type: "image" as const,
|
||||
source: {
|
||||
type: encodeType,
|
||||
media_type: mimeType,
|
||||
data,
|
||||
},
|
||||
};
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
const requestBody: AnthropicChatRequest = {
|
||||
messages: prompt,
|
||||
stream: shouldStream,
|
||||
|
||||
model: modelConfig.model,
|
||||
max_tokens: modelConfig.max_tokens,
|
||||
temperature: modelConfig.temperature,
|
||||
top_p: modelConfig.top_p,
|
||||
// top_k: modelConfig.top_k,
|
||||
top_k: 5,
|
||||
};
|
||||
|
||||
const path = this.path(Anthropic.ChatPath);
|
||||
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
|
||||
const payload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestBody),
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Accept: "application/json",
|
||||
"x-api-key": accessStore.anthropicApiKey,
|
||||
"anthropic-version": accessStore.anthropicApiVersion,
|
||||
Authorization: getAuthKey(accessStore.anthropicApiKey),
|
||||
},
|
||||
};
|
||||
|
||||
if (shouldStream) {
|
||||
try {
|
||||
const context = {
|
||||
text: "",
|
||||
finished: false,
|
||||
};
|
||||
|
||||
const finish = () => {
|
||||
if (!context.finished) {
|
||||
options.onFinish(context.text);
|
||||
context.finished = true;
|
||||
}
|
||||
};
|
||||
|
||||
controller.signal.onabort = finish;
|
||||
fetchEventSource(path, {
|
||||
...payload,
|
||||
async onopen(res) {
|
||||
const contentType = res.headers.get("content-type");
|
||||
console.log("response content type: ", contentType);
|
||||
|
||||
if (contentType?.startsWith("text/plain")) {
|
||||
context.text = await res.clone().text();
|
||||
return finish();
|
||||
}
|
||||
|
||||
if (
|
||||
!res.ok ||
|
||||
!res.headers
|
||||
.get("content-type")
|
||||
?.startsWith(EventStreamContentType) ||
|
||||
res.status !== 200
|
||||
) {
|
||||
const responseTexts = [context.text];
|
||||
let extraInfo = await res.clone().text();
|
||||
try {
|
||||
const resJson = await res.clone().json();
|
||||
extraInfo = prettyObject(resJson);
|
||||
} catch {}
|
||||
|
||||
if (res.status === 401) {
|
||||
responseTexts.push(Locale.Error.Unauthorized);
|
||||
}
|
||||
|
||||
if (extraInfo) {
|
||||
responseTexts.push(extraInfo);
|
||||
}
|
||||
|
||||
context.text = responseTexts.join("\n\n");
|
||||
|
||||
return finish();
|
||||
}
|
||||
},
|
||||
onmessage(msg) {
|
||||
let chunkJson:
|
||||
| undefined
|
||||
| {
|
||||
type: "content_block_delta" | "content_block_stop";
|
||||
delta?: {
|
||||
type: "text_delta";
|
||||
text: string;
|
||||
};
|
||||
index: number;
|
||||
};
|
||||
try {
|
||||
chunkJson = JSON.parse(msg.data);
|
||||
} catch (e) {
|
||||
console.error("[Response] parse error", msg.data);
|
||||
}
|
||||
|
||||
if (!chunkJson || chunkJson.type === "content_block_stop") {
|
||||
return finish();
|
||||
}
|
||||
|
||||
const { delta } = chunkJson;
|
||||
if (delta?.text) {
|
||||
context.text += delta.text;
|
||||
options.onUpdate?.(context.text, delta.text);
|
||||
}
|
||||
},
|
||||
onclose() {
|
||||
finish();
|
||||
},
|
||||
onerror(e) {
|
||||
options.onError?.(e);
|
||||
throw e;
|
||||
},
|
||||
openWhenHidden: true,
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("failed to chat", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
controller.signal.onabort = () => options.onFinish("");
|
||||
|
||||
const res = await fetch(path, payload);
|
||||
const resJson = await res.json();
|
||||
|
||||
const message = this.extractMessage(resJson);
|
||||
options.onFinish(message);
|
||||
} catch (e) {
|
||||
console.error("failed to chat", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
}
|
||||
async usage() {
|
||||
return {
|
||||
used: 0,
|
||||
total: 0,
|
||||
};
|
||||
}
|
||||
async models() {
|
||||
// const provider = {
|
||||
// id: "anthropic",
|
||||
// providerName: "Anthropic",
|
||||
// providerType: "anthropic",
|
||||
// };
|
||||
|
||||
return [
|
||||
// {
|
||||
// name: "claude-instant-1.2",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
// {
|
||||
// name: "claude-2.0",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
// {
|
||||
// name: "claude-2.1",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
// {
|
||||
// name: "claude-3-opus-20240229",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
// {
|
||||
// name: "claude-3-sonnet-20240229",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
// {
|
||||
// name: "claude-3-haiku-20240307",
|
||||
// available: true,
|
||||
// provider,
|
||||
// },
|
||||
];
|
||||
}
|
||||
path(path: string): string {
|
||||
const accessStore = useAccessStore.getState();
|
||||
|
||||
let baseUrl: string = accessStore.anthropicUrl;
|
||||
|
||||
// if endpoint is empty, use default endpoint
|
||||
if (baseUrl.trim().length === 0) {
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
|
||||
baseUrl = isApp
|
||||
? DEFAULT_API_HOST + "/api/proxy/anthropic"
|
||||
: ApiPath.Anthropic;
|
||||
}
|
||||
|
||||
if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) {
|
||||
baseUrl = "https://" + baseUrl;
|
||||
}
|
||||
|
||||
baseUrl = trimEnd(baseUrl, "/");
|
||||
|
||||
return `${baseUrl}/${path}`;
|
||||
}
|
||||
}
|
||||
|
||||
function trimEnd(s: string, end = " ") {
|
||||
if (end.length === 0) return s;
|
||||
|
||||
while (s.endsWith(end)) {
|
||||
s = s.slice(0, -end.length);
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
function bearer(value: string) {
|
||||
return `Bearer ${value.trim()}`;
|
||||
}
|
||||
|
||||
function getAuthKey(apiKey = "") {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const isApp = !!getClientConfig()?.isApp;
|
||||
let authKey = "";
|
||||
|
||||
if (apiKey) {
|
||||
// use user's api key first
|
||||
authKey = bearer(apiKey);
|
||||
} else if (
|
||||
accessStore.enabledAccessControl() &&
|
||||
!isApp &&
|
||||
!!accessStore.accessCode
|
||||
) {
|
||||
// or use access code
|
||||
authKey = bearer(ACCESS_CODE_PREFIX + accessStore.accessCode);
|
||||
}
|
||||
|
||||
return authKey;
|
||||
}
|
@@ -40,6 +40,20 @@ export interface OpenAIListModelResponse {
|
||||
}>;
|
||||
}
|
||||
|
||||
interface RequestPayload {
|
||||
messages: {
|
||||
role: "system" | "user" | "assistant";
|
||||
content: string | MultimodalContent[];
|
||||
}[];
|
||||
stream?: boolean;
|
||||
model: string;
|
||||
temperature: number;
|
||||
presence_penalty: number;
|
||||
frequency_penalty: number;
|
||||
top_p: number;
|
||||
max_tokens?: number;
|
||||
}
|
||||
|
||||
export class ChatGPTApi implements LLMApi {
|
||||
private disableListModels = true;
|
||||
|
||||
@@ -98,7 +112,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
},
|
||||
};
|
||||
|
||||
const requestPayload = {
|
||||
const requestPayload: RequestPayload = {
|
||||
messages,
|
||||
stream: options.config.stream,
|
||||
model: modelConfig.model,
|
||||
@@ -112,12 +126,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
|
||||
// add max_tokens to vision model
|
||||
if (visionModel) {
|
||||
Object.defineProperty(requestPayload, "max_tokens", {
|
||||
enumerable: true,
|
||||
configurable: true,
|
||||
writable: true,
|
||||
value: modelConfig.max_tokens,
|
||||
});
|
||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||
}
|
||||
|
||||
console.log("[Request] openai payload: ", requestPayload);
|
||||
@@ -229,7 +238,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
const text = msg.data;
|
||||
try {
|
||||
const json = JSON.parse(text);
|
||||
const choices = json.choices as Array<{ delta: { content: string } }>;
|
||||
const choices = json.choices as Array<{
|
||||
delta: { content: string };
|
||||
}>;
|
||||
const delta = choices[0]?.delta?.content;
|
||||
const textmoderation = json?.prompt_filter_results;
|
||||
|
||||
@@ -237,9 +248,17 @@ export class ChatGPTApi implements LLMApi {
|
||||
remainText += delta;
|
||||
}
|
||||
|
||||
if (textmoderation && textmoderation.length > 0 && ServiceProvider.Azure) {
|
||||
const contentFilterResults = textmoderation[0]?.content_filter_results;
|
||||
console.log(`[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, contentFilterResults);
|
||||
if (
|
||||
textmoderation &&
|
||||
textmoderation.length > 0 &&
|
||||
ServiceProvider.Azure
|
||||
) {
|
||||
const contentFilterResults =
|
||||
textmoderation[0]?.content_filter_results;
|
||||
console.log(
|
||||
`[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`,
|
||||
contentFilterResults,
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Request] parse error", text, msg);
|
||||
|
Reference in New Issue
Block a user