diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 095e239ec..c59f76dd3 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,4 +1,9 @@ -import { Anthropic, ApiPath } from "@/app/constant"; +import { + Anthropic, + ApiPath, + REQUEST_TIMEOUT_MS, + ServiceProvider, +} from "@/app/constant"; import { AgentChatOptions, ChatOptions, @@ -22,6 +27,11 @@ import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { RequestPayload } from "./openai"; import { fetch } from "@/app/utils/stream"; +import { + EventStreamContentType, + fetchEventSource, +} from "@fortaine/fetch-event-source"; +import Locale from "../../locales"; export type MultiBlockContent = { type: "image" | "text"; @@ -85,9 +95,163 @@ export class ClaudeApi implements LLMApi { transcription(options: TranscriptionOptions): Promise { throw new Error("Method not implemented."); } - toolAgentChat(options: AgentChatOptions): Promise { - throw new Error("Method not implemented."); + async toolAgentChat(options: AgentChatOptions) { + const visionModel = isVisionModel(options.config.model); + const messages: AgentChatOptions["messages"] = []; + for (const v of options.messages) { + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + }, + }; + const accessStore = useAccessStore.getState(); + let baseUrl = accessStore.anthropicUrl; + const requestPayload = { + chatSessionId: options.chatSessionId, + messages, + isAzure: false, + azureApiVersion: accessStore.azureApiVersion, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + baseUrl: baseUrl, + maxIterations: options.agentConfig.maxIterations, + returnIntermediateSteps: options.agentConfig.returnIntermediateSteps, + useTools: options.agentConfig.useTools, + provider: ServiceProvider.Anthropic, + }; + + console.log("[Request] anthropic payload: ", requestPayload); + + const shouldStream = true; + const controller = new AbortController(); + options.onController?.(controller); + + try { + let path = "/api/langchain/tool/agent/"; + const enableNodeJSPlugin = !!process.env.NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN; + path = enableNodeJSPlugin ? path + "nodejs" : path + "edge"; + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + // console.log("shouldStream", shouldStream); + + if (shouldStream) { + let responseText = ""; + let finished = false; + + const finish = () => { + if (!finished) { + options.onFinish(responseText); + finished = true; + } + }; + + controller.signal.onabort = finish; + + fetchEventSource(path, { + ...chatPayload, + async onopen(res) { + clearTimeout(requestTimeoutId); + const contentType = res.headers.get("content-type"); + console.log( + "[OpenAI] request response content type: ", + contentType, + ); + + if (contentType?.startsWith("text/plain")) { + responseText = await res.clone().text(); + return finish(); + } + + if ( + !res.ok || + !res.headers + .get("content-type") + ?.startsWith(EventStreamContentType) || + res.status !== 200 + ) { + const responseTexts = [responseText]; + let extraInfo = await res.clone().text(); + console.warn(`extraInfo: ${extraInfo}`); + + if (res.status === 401) { + responseTexts.push(Locale.Error.Unauthorized); + } + + if (extraInfo) { + responseTexts.push(extraInfo); + } + + responseText = responseTexts.join("\n\n"); + + return finish(); + } + }, + onmessage(msg) { + let response = JSON.parse(msg.data); + if (!response.isSuccess) { + console.error("[Request]", msg.data); + responseText = msg.data; + throw Error(response.message); + } + if (msg.data === "[DONE]" || finished) { + return finish(); + } + try { + if (response && !response.isToolMessage) { + responseText += response.message; + options.onUpdate?.(responseText, response.message); + } else { + options.onToolUpdate?.(response.toolName!, response.message); + } + } catch (e) { + console.error("[Request] parse error", response, msg); + } + }, + onclose() { + finish(); + }, + onerror(e) { + options.onError?.(e); + throw e; + }, + openWhenHidden: true, + }); + } else { + const res = await fetch(path, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat reqeust", e); + options.onError?.(e as Error); + } } + createRAGStore(options: CreateRAGStoreOptions): Promise { throw new Error("Method not implemented."); } diff --git a/app/utils.ts b/app/utils.ts index 95d9b315d..2a698ca7f 100644 --- a/app/utils.ts +++ b/app/utils.ts @@ -318,6 +318,9 @@ export function isFunctionCallModel(modelName: string) { "claude-3-opus-20240229", "claude-3-haiku-20240307", "claude-3-5-sonnet-20240620", + "claude-3-5-sonnet-20241022", + "claude-3-5-sonnet-latest", + "claude-3-5-haiku-latest", ]; if (specialModels.some((keyword) => modelName === keyword)) return true; return DEFAULT_MODELS.filter(