diff --git a/README.md b/README.md index da1adb6a5..c8b158956 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,8 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) +[](https://monica.im/?utm=nxcrp) + ## Enterprise Edition @@ -89,13 +91,13 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] Desktop App with tauri - [x] Self-host Model: Fully compatible with [RWKV-Runner](https://github.com/josStorer/RWKV-Runner), as well as server deployment of [LocalAI](https://github.com/go-skynet/LocalAI): llama/gpt4all/rwkv/vicuna/koala/gpt4all-j/cerebras/falcon/dolly etc. - [x] Artifacts: Easily preview, copy and share generated content/webpages through a separate window [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] Plugins: support artifacts, network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - - [x] artifacts - - [ ] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] Plugins: support network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] network search, calculator, any other apis etc. [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [ ] local knowledge base ## What's New +- 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.10.1 support Google Gemini Pro model. - 🚀 v2.9.11 you can use azure endpoint now. @@ -126,13 +128,13 @@ For enterprise inquiries, please contact: **business@nextchat.dev** - [x] 使用 tauri 打包桌面应用 - [x] 支持自部署的大语言模型:开箱即用 [RWKV-Runner](https://github.com/josStorer/RWKV-Runner) ,服务端部署 [LocalAI 项目](https://github.com/go-skynet/LocalAI) llama / gpt4all / rwkv / vicuna / koala / gpt4all-j / cerebras / falcon / dolly 等等,或者使用 [api-for-open-llm](https://github.com/xusenlinzy/api-for-open-llm) - [x] Artifacts: 通过独立窗口,轻松预览、复制和分享生成的内容/可交互网页 [#5092](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/pull/5092) -- [x] 插件机制,支持 artifacts,联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) - - [x] artifacts - - [ ] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) +- [x] 插件机制,支持`联网搜索`、`计算器`、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) + - [x] 支持联网搜索、计算器、调用其他平台 api [#165](https://github.com/Yidadaa/ChatGPT-Next-Web/issues/165) [#5353](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5353) - [ ] 本地知识库 ## 最新动态 +- 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 现在支持 Artifacts & SD 了。 - 🚀 v2.10.1 现在支持 Gemini Pro 模型。 - 🚀 v2.9.11 现在可以使用自定义 Azure 服务了。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 06e3e5160..24aa5ec04 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -10,6 +10,8 @@ import { handle as alibabaHandler } from "../../alibaba"; import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; +import { handle as proxyHandler } from "../../proxy"; + async function handle( req: NextRequest, { params }: { params: { provider: string; path: string[] } }, @@ -36,8 +38,10 @@ async function handle( return stabilityHandler(req, { params }); case ApiPath.Iflytek: return iflytekHandler(req, { params }); - default: + case ApiPath.OpenAI: return openaiHandler(req, { params }); + default: + return proxyHandler(req, { params }); } } diff --git a/app/api/common.ts b/app/api/common.ts index 24453dd96..25decbf62 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -32,10 +32,7 @@ export async function requestOpenai(req: NextRequest) { authHeaderName = "Authorization"; } - let path = `${req.nextUrl.pathname}${req.nextUrl.search}`.replaceAll( - "/api/openai/", - "", - ); + let path = `${req.nextUrl.pathname}`.replaceAll("/api/openai/", ""); let baseUrl = (isAzure ? serverConfig.azureUrl : serverConfig.baseUrl) || OPENAI_BASE_URL; diff --git a/app/api/openai.ts b/app/api/openai.ts index 0059ff8b4..7dfd84e17 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -13,7 +13,9 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( - (m) => !m.id.startsWith("gpt-4") || m.id.startsWith("gpt-4o-mini"), + (m) => + !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) || + m.id.startsWith("gpt-4o-mini"), ); } diff --git a/app/api/proxy.ts b/app/api/proxy.ts new file mode 100644 index 000000000..731003aa1 --- /dev/null +++ b/app/api/proxy.ts @@ -0,0 +1,75 @@ +import { NextRequest, NextResponse } from "next/server"; + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[Proxy Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + // remove path params from searchParams + req.nextUrl.searchParams.delete("path"); + req.nextUrl.searchParams.delete("provider"); + + const subpath = params.path.join("/"); + const fetchUrl = `${req.headers.get( + "x-base-url", + )}/${subpath}?${req.nextUrl.searchParams.toString()}`; + const skipHeaders = ["connection", "host", "origin", "referer", "cookie"]; + const headers = new Headers( + Array.from(req.headers.entries()).filter((item) => { + if ( + item[0].indexOf("x-") > -1 || + item[0].indexOf("sec-") > -1 || + skipHeaders.includes(item[0]) + ) { + return false; + } + return true; + }), + ); + const controller = new AbortController(); + const fetchOptions: RequestInit = { + headers, + method: req.method, + body: req.body, + // to fix #2485: https://stackoverflow.com/questions/55920957/cloudflare-worker-typeerror-one-time-use-body + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + try { + const res = await fetch(fetchUrl, fetchOptions); + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + // The latest version of the OpenAI API forced the content-encoding to be "br" in json response + // So if the streaming is disabled, we need to remove the content-encoding header + // Because Vercel uses gzip to compress the response, if we don't remove the content-encoding header + // The browser will try to decode the response with brotli and fail + newHeaders.delete("content-encoding"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index d7fb023a2..cecc453ba 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -5,7 +5,13 @@ import { ModelProvider, ServiceProvider, } from "../constant"; -import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store"; +import { + ChatMessageTool, + ChatMessage, + ModelType, + useAccessStore, + useChatStore, +} from "../store"; import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai"; import { GeminiProApi } from "./platforms/google"; import { ClaudeApi } from "./platforms/anthropic"; @@ -56,6 +62,8 @@ export interface ChatOptions { onFinish: (message: string) => void; onError?: (err: Error) => void; onController?: (controller: AbortController) => void; + onBeforeTool?: (tool: ChatMessageTool) => void; + onAfterTool?: (tool: ChatMessageTool) => void; } export interface LLMUsage { diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index b079ba1ad..7dd39c9cd 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,6 +1,12 @@ import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, + ChatMessageTool, +} from "@/app/store"; import { getClientConfig } from "@/app/config/client"; import { DEFAULT_API_HOST } from "@/app/constant"; import { @@ -11,8 +17,9 @@ import { import Locale from "../../locales"; import { prettyObject } from "@/app/utils/format"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; -import { preProcessImageContent } from "@/app/utils/chat"; +import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; +import { RequestPayload } from "./openai"; export type MultiBlockContent = { type: "image" | "text"; @@ -191,112 +198,126 @@ export class ClaudeApi implements LLMApi { const controller = new AbortController(); options.onController?.(controller); - const payload = { - method: "POST", - body: JSON.stringify(requestBody), - signal: controller.signal, - headers: { - ...getHeaders(), // get common headers - "anthropic-version": accessStore.anthropicApiVersion, - // do not send `anthropicApiKey` in browser!!! - // Authorization: getAuthKey(accessStore.anthropicApiKey), - }, - }; - if (shouldStream) { - try { - const context = { - text: "", - finished: false, - }; - - const finish = () => { - if (!context.finished) { - options.onFinish(context.text); - context.finished = true; - } - }; - - controller.signal.onabort = finish; - fetchEventSource(path, { - ...payload, - async onopen(res) { - const contentType = res.headers.get("content-type"); - console.log("response content type: ", contentType); - - if (contentType?.startsWith("text/plain")) { - context.text = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [context.text]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - context.text = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - let chunkJson: - | undefined - | { - type: "content_block_delta" | "content_block_stop"; - delta?: { - type: "text_delta"; - text: string; - }; - index: number; + let index = -1; + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + path, + requestBody, + { + ...getHeaders(), + "anthropic-version": accessStore.anthropicApiVersion, + }, + // @ts-ignore + tools.map((tool) => ({ + name: tool?.function?.name, + description: tool?.function?.description, + input_schema: tool?.function?.parameters, + })), + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + let chunkJson: + | undefined + | { + type: "content_block_delta" | "content_block_stop"; + content_block?: { + type: "tool_use"; + id: string; + name: string; }; - try { - chunkJson = JSON.parse(msg.data); - } catch (e) { - console.error("[Response] parse error", msg.data); - } + delta?: { + type: "text_delta" | "input_json_delta"; + text?: string; + partial_json?: string; + }; + index: number; + }; + chunkJson = JSON.parse(text); - if (!chunkJson || chunkJson.type === "content_block_stop") { - return finish(); - } - - const { delta } = chunkJson; - if (delta?.text) { - context.text += delta.text; - options.onUpdate?.(context.text, delta.text); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } catch (e) { - console.error("failed to chat", e); - options.onError?.(e as Error); - } + if (chunkJson?.content_block?.type == "tool_use") { + index += 1; + const id = chunkJson?.content_block.id; + const name = chunkJson?.content_block.name; + runTools.push({ + id, + type: "function", + function: { + name, + arguments: "", + }, + }); + } + if ( + chunkJson?.delta?.type == "input_json_delta" && + chunkJson?.delta?.partial_json + ) { + // @ts-ignore + runTools[index]["function"]["arguments"] += + chunkJson?.delta?.partial_json; + } + return chunkJson?.delta?.text; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // reset index value + index = -1; + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + { + role: "assistant", + content: toolCallMessage.tool_calls.map( + (tool: ChatMessageTool) => ({ + type: "tool_use", + id: tool.id, + name: tool?.function?.name, + input: tool?.function?.arguments + ? JSON.parse(tool?.function?.arguments) + : {}, + }), + ), + }, + // @ts-ignore + ...toolCallResult.map((result) => ({ + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: result.tool_call_id, + content: result.content, + }, + ], + })), + ); + }, + options, + ); } else { + const payload = { + method: "POST", + body: JSON.stringify(requestBody), + signal: controller.signal, + headers: { + ...getHeaders(), // get common headers + "anthropic-version": accessStore.anthropicApiVersion, + // do not send `anthropicApiKey` in browser!!! + // Authorization: getAuthKey(accessStore.anthropicApiKey), + }, + }; + try { controller.signal.onabort = () => options.onFinish(""); diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index 7d257ccb2..cd10d2f6c 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -8,9 +8,15 @@ import { REQUEST_TIMEOUT_MS, ServiceProvider, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; import { collectModelsWithDefaultModel } from "@/app/utils/model"; -import { preProcessImageContent } from "@/app/utils/chat"; +import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { @@ -116,115 +122,66 @@ export class MoonshotApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - } catch (e) { - console.error("[Request] parse error", text, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); + options, + ); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index d4e262c16..4d6f28845 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -9,12 +9,19 @@ import { REQUEST_TIMEOUT_MS, ServiceProvider, } from "@/app/constant"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; +import { + ChatMessageTool, + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, +} from "@/app/store"; import { collectModelsWithDefaultModel } from "@/app/utils/model"; import { preProcessImageContent, uploadImage, base64Image2Blob, + stream, } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { DalleSize, DalleQuality, DalleStyle } from "@/app/typing"; @@ -233,143 +240,82 @@ export class ChatGPTApi implements LLMApi { isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath, ); } - const chatPayload = { - method: "POST", - body: JSON.stringify(requestPayload), - signal: controller.signal, - headers: getHeaders(), - }; - - // make a fetch request - const requestTimeoutId = setTimeout( - () => controller.abort(), - isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. - ); - if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - console.log("[Response Animation] finished"); - if (responseText?.length === 0) { - options.onError?.(new Error("empty response from server")); + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + // console.log("getAsTools", tools, funcs); + stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } } - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), }; - controller.signal.onabort = finish; + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + ); - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[OpenAI] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const choices = json.choices as Array<{ - delta: { content: string }; - }>; - const delta = choices[0]?.delta?.content; - const textmoderation = json?.prompt_filter_results; - - if (delta) { - remainText += delta; - } - - if ( - textmoderation && - textmoderation.length > 0 && - ServiceProvider.Azure - ) { - const contentFilterResults = - textmoderation[0]?.content_filter_results; - console.log( - `[${ServiceProvider.Azure}] [Text Moderation] flagged categories result:`, - contentFilterResults, - ); - } - } catch (e) { - console.error("[Request] parse error", text, msg); - } - }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; - }, - openWhenHidden: true, - }); - } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); @@ -461,7 +407,9 @@ export class ChatGPTApi implements LLMApi { }); const resJson = (await res.json()) as OpenAIListModelResponse; - const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-")); + const chatModels = resJson.data?.filter( + (m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"), + ); console.log("[Models]", chatModels); if (!chatModels) { diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx index 326891e73..ac0d713b3 100644 --- a/app/components/artifacts.tsx +++ b/app/components/artifacts.tsx @@ -1,4 +1,11 @@ -import { useEffect, useState, useRef, useMemo } from "react"; +import { + useEffect, + useState, + useRef, + useMemo, + forwardRef, + useImperativeHandle, +} from "react"; import { useParams } from "react-router"; import { useWindowSize } from "@/app/utils"; import { IconButton } from "./button"; @@ -8,6 +15,7 @@ import CopyIcon from "../icons/copy.svg"; import DownloadIcon from "../icons/download.svg"; import GithubIcon from "../icons/github.svg"; import LoadingButtonIcon from "../icons/loading.svg"; +import ReloadButtonIcon from "../icons/reload.svg"; import Locale from "../locales"; import { Modal, showToast } from "./ui-lib"; import { copyToClipboard, downloadAs } from "../utils"; @@ -15,73 +23,89 @@ import { Path, ApiPath, REPO_URL } from "@/app/constant"; import { Loading } from "./home"; import styles from "./artifacts.module.scss"; -export function HTMLPreview(props: { +type HTMLPreviewProps = { code: string; autoHeight?: boolean; height?: number | string; onLoad?: (title?: string) => void; -}) { - const ref = useRef(null); - const frameId = useRef(nanoid()); - const [iframeHeight, setIframeHeight] = useState(600); - const [title, setTitle] = useState(""); - /* - * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an - * 1. using srcdoc - * 2. using src with dataurl: - * easy to share - * length limit (Data URIs cannot be larger than 32,768 characters.) - */ +}; - useEffect(() => { - const handleMessage = (e: any) => { - const { id, height, title } = e.data; - setTitle(title); - if (id == frameId.current) { - setIframeHeight(height); +export type HTMLPreviewHander = { + reload: () => void; +}; + +export const HTMLPreview = forwardRef( + function HTMLPreview(props, ref) { + const iframeRef = useRef(null); + const [frameId, setFrameId] = useState(nanoid()); + const [iframeHeight, setIframeHeight] = useState(600); + const [title, setTitle] = useState(""); + /* + * https://stackoverflow.com/questions/19739001/what-is-the-difference-between-srcdoc-and-src-datatext-html-in-an + * 1. using srcdoc + * 2. using src with dataurl: + * easy to share + * length limit (Data URIs cannot be larger than 32,768 characters.) + */ + + useEffect(() => { + const handleMessage = (e: any) => { + const { id, height, title } = e.data; + setTitle(title); + if (id == frameId) { + setIframeHeight(height); + } + }; + window.addEventListener("message", handleMessage); + return () => { + window.removeEventListener("message", handleMessage); + }; + }, [frameId]); + + useImperativeHandle(ref, () => ({ + reload: () => { + setFrameId(nanoid()); + }, + })); + + const height = useMemo(() => { + if (!props.autoHeight) return props.height || 600; + if (typeof props.height === "string") { + return props.height; + } + const parentHeight = props.height || 600; + return iframeHeight + 40 > parentHeight + ? parentHeight + : iframeHeight + 40; + }, [props.autoHeight, props.height, iframeHeight]); + + const srcDoc = useMemo(() => { + const script = ``; + if (props.code.includes("")) { + props.code.replace("", "" + script); + } + return script + props.code; + }, [props.code, frameId]); + + const handleOnLoad = () => { + if (props?.onLoad) { + props.onLoad(title); } }; - window.addEventListener("message", handleMessage); - return () => { - window.removeEventListener("message", handleMessage); - }; - }, []); - const height = useMemo(() => { - if (!props.autoHeight) return props.height || 600; - if (typeof props.height === "string") { - return props.height; - } - const parentHeight = props.height || 600; - return iframeHeight + 40 > parentHeight ? parentHeight : iframeHeight + 40; - }, [props.autoHeight, props.height, iframeHeight]); - - const srcDoc = useMemo(() => { - const script = ``; - if (props.code.includes("")) { - props.code.replace("", "" + script); - } - return props.code + script; - }, [props.code]); - - const handleOnLoad = () => { - if (props?.onLoad) { - props.onLoad(title); - } - }; - - return ( -