diff --git a/.env.template b/.env.template
index ff6d3cdbf..6b6021392 100644
--- a/.env.template
+++ b/.env.template
@@ -50,4 +50,22 @@ DISABLE_FAST_LINK=
# (optional)
# Default: 1
# If your project is not deployed on Vercel, set this value to 1.
-NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
\ No newline at end of file
+NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
+
+# (optional)
+# Default: Empty
+# If you want to enable RAG, set this value to 1.
+NEXT_PUBLIC_ENABLE_RAG=
+
+# (optional)
+# Default: Empty
+# Model used when RAG vectorized data.
+RAG_EMBEDDING_MODEL=text-embedding-ada-002
+
+# Configuration is required when turning on RAG.
+# Default: Empty
+QDRANT_URL=
+
+# Configuration is required when turning on RAG.
+# Default: Empty
+QDRANT_API_KEY=
\ No newline at end of file
diff --git a/README.md b/README.md
index 60ddc1374..7e82c2404 100644
--- a/README.md
+++ b/README.md
@@ -25,7 +25,7 @@
> [!WARNING]
> 本项目插件功能基于 [OpenAI API 函数调用](https://platform.openai.com/docs/guides/function-calling) 功能实现,转发 GitHub Copilot 接口或类似实现的模拟接口并不能正常调用插件功能!
-
+

@@ -35,6 +35,9 @@
## 主要功能
+- RAG 功能 (预览)
+ - 配置请参考文档[RAG 功能配置说明](./docs/rag-cn.md)
+
- 除插件工具外,与原项目保持一致 [ChatGPT-Next-Web 主要功能](https://github.com/Yidadaa/ChatGPT-Next-Web#主要功能)
- 支持 OpenAI TTS(文本转语音)https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
@@ -142,7 +145,7 @@
- [x] 支持语音输入 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
-- [ ] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
+- [x] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
- [ ] 支持 Azure Storage https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/217
@@ -295,11 +298,9 @@ docker run -d -p 3000:3000 \
| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)
-## 贡献者
+## Star History
-
-
-
+[](https://star-history.com/#Hk-Gosuto/ChatGPT-Next-Web-LangChain&Date)
## 捐赠
diff --git a/app/api/langchain-tools/rag_search.ts b/app/api/langchain-tools/rag_search.ts
index eaf86fd60..16755ea39 100644
--- a/app/api/langchain-tools/rag_search.ts
+++ b/app/api/langchain-tools/rag_search.ts
@@ -7,6 +7,8 @@ import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { Pinecone } from "@pinecone-database/pinecone";
import { PineconeStore } from "@langchain/pinecone";
+import { getServerSideConfig } from "@/app/config/server";
+import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
export class RAGSearch extends Tool {
static lc_name() {
@@ -34,21 +36,32 @@ export class RAGSearch extends Tool {
/** @ignore */
async _call(inputs: string, runManager?: CallbackManagerForToolRun) {
- const pinecone = new Pinecone();
- const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
- const vectorStore = await PineconeStore.fromExistingIndex(this.embeddings, {
- pineconeIndex,
- });
+ const serverConfig = getServerSideConfig();
+ // const pinecone = new Pinecone();
+ // const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
+ // const vectorStore = await PineconeStore.fromExistingIndex(this.embeddings, {
+ // pineconeIndex,
+ // });
+ const vectorStore = await QdrantVectorStore.fromExistingCollection(
+ this.embeddings,
+ {
+ url: process.env.QDRANT_URL,
+ apiKey: process.env.QDRANT_API_KEY,
+ collectionName: this.sessionId,
+ },
+ );
let context;
- const returnCunt = process.env.RAG_RETURN_COUNT
- ? parseInt(process.env.RAG_RETURN_COUNT, 10)
+ const returnCunt = serverConfig.ragReturnCount
+ ? parseInt(serverConfig.ragReturnCount, 10)
: 4;
- const results = await vectorStore.similaritySearch(inputs, returnCunt, {
- sessionId: this.sessionId,
- });
+ console.log("[rag-search]", { inputs, returnCunt });
+ // const results = await vectorStore.similaritySearch(inputs, returnCunt, {
+ // sessionId: this.sessionId,
+ // });
+ const results = await vectorStore.similaritySearch(inputs, returnCunt);
context = formatDocumentsAsString(results);
- console.log("[rag-search]", context);
+ console.log("[rag-search]", { context });
return context;
// const input = `Text:${context}\n\nQuestion:${inputs}\n\nI need you to answer the question based on the text.`;
diff --git a/app/api/langchain/rag/search/route.ts b/app/api/langchain/rag/search/route.ts
index 7eb7e0f1b..8c5aae6ea 100644
--- a/app/api/langchain/rag/search/route.ts
+++ b/app/api/langchain/rag/search/route.ts
@@ -4,6 +4,7 @@ import { ACCESS_CODE_PREFIX, ModelProvider } from "@/app/constant";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Pinecone } from "@pinecone-database/pinecone";
import { PineconeStore } from "@langchain/pinecone";
+import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
import { getServerSideConfig } from "@/app/config/server";
interface RequestBody {
@@ -27,26 +28,40 @@ async function handle(req: NextRequest) {
const reqBody: RequestBody = await req.json();
const authToken = req.headers.get("Authorization") ?? "";
const token = authToken.trim().replaceAll("Bearer ", "").trim();
-
- const pinecone = new Pinecone();
- const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
+ const serverConfig = getServerSideConfig();
+ // const pinecone = new Pinecone();
+ // const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
const apiKey = getOpenAIApiKey(token);
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
const embeddings = new OpenAIEmbeddings(
{
- modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
+ modelName: serverConfig.ragEmbeddingModel ?? "text-embedding-3-large",
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
- const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
- pineconeIndex,
- });
- const results = await vectorStore.similaritySearch(reqBody.query, 1, {
- sessionId: reqBody.sessionId,
- });
- console.log(results);
- return NextResponse.json(results, {
+ // const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
+ // pineconeIndex,
+ // });
+ // const results = await vectorStore.similaritySearch(reqBody.query, 4, {
+ // sessionId: reqBody.sessionId,
+ // });
+ const vectorStore = await QdrantVectorStore.fromExistingCollection(
+ embeddings,
+ {
+ url: process.env.QDRANT_URL,
+ apiKey: process.env.QDRANT_API_KEY,
+ collectionName: reqBody.sessionId,
+ },
+ );
+ const returnCunt = serverConfig.ragReturnCount
+ ? parseInt(serverConfig.ragReturnCount, 10)
+ : 4;
+ const response = await vectorStore.similaritySearch(
+ reqBody.query,
+ returnCunt,
+ );
+ return NextResponse.json(response, {
status: 200,
});
} catch (e) {
diff --git a/app/api/langchain/rag/store/route.ts b/app/api/langchain/rag/store/route.ts
index cbc14a3f7..9ded033d9 100644
--- a/app/api/langchain/rag/store/route.ts
+++ b/app/api/langchain/rag/store/route.ts
@@ -20,6 +20,7 @@ import { FileInfo } from "@/app/client/platforms/utils";
import mime from "mime";
import LocalFileStorage from "@/app/utils/local_file_storage";
import S3FileStorage from "@/app/utils/s3_file_storage";
+import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
interface RequestBody {
sessionId: string;
@@ -80,16 +81,17 @@ async function handle(req: NextRequest) {
const apiKey = getOpenAIApiKey(token);
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
const serverConfig = getServerSideConfig();
- const pinecone = new Pinecone();
- const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
+ // const pinecone = new Pinecone();
+ // const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
const embeddings = new OpenAIEmbeddings(
{
- modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
+ modelName: serverConfig.ragEmbeddingModel,
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
- //https://js.langchain.com/docs/integrations/vectorstores/pinecone
+ // https://js.langchain.com/docs/integrations/vectorstores/pinecone
+ // https://js.langchain.com/docs/integrations/vectorstores/qdrant
// process files
for (let i = 0; i < reqBody.fileInfos.length; i++) {
const fileInfo = reqBody.fileInfos[i];
@@ -121,22 +123,33 @@ async function handle(req: NextRequest) {
};
});
// split
- const chunkSize = process.env.RAG_CHUNK_SIZE
- ? parseInt(process.env.RAG_CHUNK_SIZE, 10)
+ const chunkSize = serverConfig.ragChunkSize
+ ? parseInt(serverConfig.ragChunkSize, 10)
: 2000;
- const chunkOverlap = process.env.RAG_CHUNK_OVERLAP
- ? parseInt(process.env.RAG_CHUNK_OVERLAP, 10)
+ const chunkOverlap = serverConfig.ragChunkOverlap
+ ? parseInt(serverConfig.ragChunkOverlap, 10)
: 200;
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: chunkSize,
chunkOverlap: chunkOverlap,
});
const splits = await textSplitter.splitDocuments(docs);
- // remove history
- await PineconeStore.fromDocuments(splits, embeddings, {
- pineconeIndex,
- maxConcurrency: 5,
- });
+ const vectorStore = await QdrantVectorStore.fromDocuments(
+ splits,
+ embeddings,
+ {
+ url: process.env.QDRANT_URL,
+ apiKey: process.env.QDRANT_API_KEY,
+ collectionName: reqBody.sessionId,
+ },
+ );
+ // await PineconeStore.fromDocuments(splits, embeddings, {
+ // pineconeIndex,
+ // maxConcurrency: 5,
+ // });
+ // const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
+ // pineconeIndex,
+ // });
}
return NextResponse.json(
{
diff --git a/app/client/api.ts b/app/client/api.ts
index 814befd9f..77759b318 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -115,7 +115,7 @@ export abstract class LLMApi {
abstract speech(options: SpeechOptions): Promise;
abstract transcription(options: TranscriptionOptions): Promise;
abstract toolAgentChat(options: AgentChatOptions): Promise;
- abstract createRAGSore(options: CreateRAGStoreOptions): Promise;
+ abstract createRAGStore(options: CreateRAGStoreOptions): Promise;
abstract usage(): Promise;
abstract models(): Promise;
}
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index 4f5f536d5..ffc1e977f 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -20,7 +20,7 @@ import {
} from "@/app/utils";
export class GeminiProApi implements LLMApi {
- createRAGSore(options: CreateRAGStoreOptions): Promise {
+ createRAGStore(options: CreateRAGStoreOptions): Promise {
throw new Error("Method not implemented.");
}
transcription(options: TranscriptionOptions): Promise {
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index ef383fa00..0ff9ad706 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -363,7 +363,7 @@ export class ChatGPTApi implements LLMApi {
}
}
- async createRAGSore(options: CreateRAGStoreOptions): Promise {
+ async createRAGStore(options: CreateRAGStoreOptions): Promise {
try {
const accessStore = useAccessStore.getState();
const isAzure = accessStore.provider === ServiceProvider.Azure;
@@ -373,7 +373,7 @@ export class ChatGPTApi implements LLMApi {
fileInfos: options.fileInfos,
baseUrl: baseUrl,
};
- console.log("[Request] openai payload: ", requestPayload);
+ console.log("[Request] rag store payload: ", requestPayload);
const controller = new AbortController();
options.onController?.(controller);
let path = "/api/langchain/rag/store";
diff --git a/app/components/chat.tsx b/app/components/chat.tsx
index 4c63393f8..7ddb929ad 100644
--- a/app/components/chat.tsx
+++ b/app/components/chat.tsx
@@ -509,14 +509,13 @@ export function ChatActions(props: {
const [showUploadImage, setShowUploadImage] = useState(false);
const [showUploadFile, setShowUploadFile] = useState(false);
+ const accessStore = useAccessStore();
useEffect(() => {
const show = isVisionModel(currentModel);
setShowUploadImage(show);
- const serverConfig = getServerSideConfig();
- setShowUploadFile(
- serverConfig.isEnableRAG && !show && isSupportRAGModel(currentModel),
- );
+ const isEnableRAG = !!process.env.NEXT_PUBLIC_ENABLE_RAG;
+ setShowUploadFile(isEnableRAG && !show && isSupportRAGModel(currentModel));
if (!show) {
props.setAttachImages([]);
props.setUploading(false);
@@ -1039,7 +1038,9 @@ function _Chat() {
setIsLoading(true);
const textContent = getMessageTextContent(userMessage);
const images = getMessageImages(userMessage);
- chatStore.onUserInput(textContent, images).then(() => setIsLoading(false));
+ chatStore
+ .onUserInput(textContent, images, userMessage.fileInfos)
+ .then(() => setIsLoading(false));
inputRef.current?.focus();
};
diff --git a/app/config/server.ts b/app/config/server.ts
index 9ee6597b8..7a8228b9f 100644
--- a/app/config/server.ts
+++ b/app/config/server.ts
@@ -113,5 +113,10 @@ export const getServerSideConfig = () => {
!process.env.S3_ENDPOINT,
isEnableRAG: !!process.env.NEXT_PUBLIC_ENABLE_RAG,
+ ragEmbeddingModel:
+ process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
+ ragChunkSize: process.env.RAG_CHUNK_SIZE ?? "2000",
+ ragChunkOverlap: process.env.RAG_CHUNK_OVERLAP ?? "200",
+ ragReturnCount: process.env.RAG_RETURN_COUNT ?? "4",
};
};
diff --git a/app/store/access.ts b/app/store/access.ts
index 17fb0f94c..75442a6a9 100644
--- a/app/store/access.ts
+++ b/app/store/access.ts
@@ -43,6 +43,7 @@ const DEFAULT_ACCESS_STATE = {
disableGPT4: false,
disableFastLink: false,
customModels: "",
+ isEnableRAG: false,
};
export const useAccessStore = createPersistStore(
@@ -55,6 +56,10 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
+ isEnableRAG() {
+ return ensure(get(), ["isEnableRAG"]);
+ },
+
isValidOpenAI() {
return ensure(get(), ["openaiApiKey"]);
},
diff --git a/app/store/chat.ts b/app/store/chat.ts
index 48a43e330..b46ef2819 100644
--- a/app/store/chat.ts
+++ b/app/store/chat.ts
@@ -376,88 +376,96 @@ export const useChatStore = createPersistStore(
});
var api: ClientApi;
api = new ClientApi(ModelProvider.GPT);
+ const isEnableRAG = !!process.env.NEXT_PUBLIC_ENABLE_RAG;
if (
config.pluginConfig.enable &&
session.mask.usePlugins &&
- (allPlugins.length > 0 || !!process.env.NEXT_PUBLIC_ENABLE_RAG) &&
+ (allPlugins.length > 0 || isEnableRAG) &&
modelConfig.model.startsWith("gpt") &&
modelConfig.model != "gpt-4-vision-preview"
) {
console.log("[ToolAgent] start");
const pluginToolNames = allPlugins.map((m) => m.toolName);
- if (!!process.env.NEXT_PUBLIC_ENABLE_RAG)
- pluginToolNames.push("rag-search");
- if (attachFiles && attachFiles.length > 0) {
- console.log("crete rag store");
- await api.llm.createRAGSore({
+ if (isEnableRAG) pluginToolNames.push("rag-search");
+ const agentCall = () => {
+ api.llm.toolAgentChat({
chatSessionId: session.id,
- fileInfos: attachFiles,
- });
- }
- api.llm.toolAgentChat({
- chatSessionId: session.id,
- messages: sendMessages,
- config: { ...modelConfig, stream: true },
- agentConfig: { ...pluginConfig, useTools: pluginToolNames },
- onUpdate(message) {
- botMessage.streaming = true;
- if (message) {
- botMessage.content = message;
- }
- get().updateCurrentSession((session) => {
- session.messages = session.messages.concat();
- });
- },
- onToolUpdate(toolName, toolInput) {
- botMessage.streaming = true;
- if (toolName && toolInput) {
- botMessage.toolMessages!.push({
- toolName,
- toolInput,
+ messages: sendMessages,
+ config: { ...modelConfig, stream: true },
+ agentConfig: { ...pluginConfig, useTools: pluginToolNames },
+ onUpdate(message) {
+ botMessage.streaming = true;
+ if (message) {
+ botMessage.content = message;
+ }
+ get().updateCurrentSession((session) => {
+ session.messages = session.messages.concat();
});
- }
- get().updateCurrentSession((session) => {
- session.messages = session.messages.concat();
- });
- },
- onFinish(message) {
- botMessage.streaming = false;
- if (message) {
- botMessage.content = message;
- get().onNewMessage(botMessage);
- }
- ChatControllerPool.remove(session.id, botMessage.id);
- },
- onError(error) {
- const isAborted = error.message.includes("aborted");
- botMessage.content +=
- "\n\n" +
- prettyObject({
- error: true,
- message: error.message,
+ },
+ onToolUpdate(toolName, toolInput) {
+ botMessage.streaming = true;
+ if (toolName && toolInput) {
+ botMessage.toolMessages!.push({
+ toolName,
+ toolInput,
+ });
+ }
+ get().updateCurrentSession((session) => {
+ session.messages = session.messages.concat();
});
- botMessage.streaming = false;
- userMessage.isError = !isAborted;
- botMessage.isError = !isAborted;
- get().updateCurrentSession((session) => {
- session.messages = session.messages.concat();
- });
- ChatControllerPool.remove(
- session.id,
- botMessage.id ?? messageIndex,
- );
+ },
+ onFinish(message) {
+ botMessage.streaming = false;
+ if (message) {
+ botMessage.content = message;
+ get().onNewMessage(botMessage);
+ }
+ ChatControllerPool.remove(session.id, botMessage.id);
+ },
+ onError(error) {
+ const isAborted = error.message.includes("aborted");
+ botMessage.content +=
+ "\n\n" +
+ prettyObject({
+ error: true,
+ message: error.message,
+ });
+ botMessage.streaming = false;
+ userMessage.isError = !isAborted;
+ botMessage.isError = !isAborted;
+ get().updateCurrentSession((session) => {
+ session.messages = session.messages.concat();
+ });
+ ChatControllerPool.remove(
+ session.id,
+ botMessage.id ?? messageIndex,
+ );
- console.error("[Chat] failed ", error);
- },
- onController(controller) {
- // collect controller for stop/retry
- ChatControllerPool.addController(
- session.id,
- botMessage.id ?? messageIndex,
- controller,
- );
- },
- });
+ console.error("[Chat] failed ", error);
+ },
+ onController(controller) {
+ // collect controller for stop/retry
+ ChatControllerPool.addController(
+ session.id,
+ botMessage.id ?? messageIndex,
+ controller,
+ );
+ },
+ });
+ };
+ if (attachFiles && attachFiles.length > 0) {
+ await api.llm
+ .createRAGStore({
+ chatSessionId: session.id,
+ fileInfos: attachFiles,
+ })
+ .then(() => {
+ console.log("[RAG]", "Vector db created");
+ agentCall();
+ });
+ } else {
+ agentCall();
+ }
} else {
if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);
diff --git a/docs/images/rag-example.jpg b/docs/images/rag-example.jpg
new file mode 100644
index 000000000..920b81733
Binary files /dev/null and b/docs/images/rag-example.jpg differ
diff --git a/docs/images/rag.png b/docs/images/rag.png
new file mode 100644
index 000000000..a28904e55
Binary files /dev/null and b/docs/images/rag.png differ
diff --git a/docs/rag-cn.md b/docs/rag-cn.md
new file mode 100644
index 000000000..2183f2495
--- /dev/null
+++ b/docs/rag-cn.md
@@ -0,0 +1,74 @@
+# RAG 功能配置说明
+
+> [!WARNING]
+> 该功能目前在预览阶段,可能会有较多的问题,请在仔细阅读本文档后再使用。
+
+## 效果图
+
+
+
+## 原理
+
+
+
+## 已知问题
+
+- 由于接口中使用 nodejs 运行时,在 vercel 环境下接口可能会超时,建议使用 docker 部署
+- 已开启的插件可能会影响到数据检索,可以关闭部分插件后再使用
+- 已创建的向量数据不会删除
+- 同一聊天窗口内即使“清除聊天”也可以访问已经上传的文件内容
+- RAG 插件需要一定的话术来让模型触发查询
+- 上传文件部分的 UI 交互可能会变更
+- 暂不支持文档总结
+
+## 支持的文件类型
+
+- txt
+- md
+- pdf
+- docx
+- csv
+- json
+- srt
+- mp3 (基于OpenAIWhisper)
+
+## 配置
+
+1. 登录 https://cloud.qdrant.io 并创建一个账户
+2. 在控制面板中创建一个 Cluster
+3. 获取 Cluster 的 Cluster URL 和 API Key
+4. 完善下面的环境变量配置后即可使用
+
+## 环境变量
+
+### `NEXT_PUBLIC_ENABLE_RAG`
+
+如果你想启用 RAG 功能,将此环境变量设置为 1 即可。
+
+### `QDRANT_URL`
+
+qdrant 服务的 Cluster URL。
+
+### `QDRANT_API_KEY`
+
+qdrant 服务的 ApiKey。
+
+### `RAG_CHUNK_SIZE` (可选)
+
+分割后文档的最大大小(按字符数计算),默认:2000。
+
+### `RAG_CHUNK_OVERLAP` (可选)
+
+分割文档时块重叠数量,默认:200。
+
+### `RAG_RETURN_COUNT` (可选)
+
+检索时返回的文档数量,默认:4。
+
+### `RAG_EMBEDDING_MODEL` (可选)
+
+向量化时使用的向量模型,默认:text-embedding-3-large。
+可选项:
+- text-embedding-3-small
+- text-embedding-3-large
+- text-embedding-ada-002
\ No newline at end of file
diff --git a/package.json b/package.json
index 5a9e85b7a..eea27d300 100644
--- a/package.json
+++ b/package.json
@@ -27,6 +27,7 @@
"@langchain/pinecone": "^0.0.4",
"@next/third-parties": "^14.1.0",
"@pinecone-database/pinecone": "^2.2.0",
+ "@qdrant/js-client-rest": "^1.8.2",
"@svgr/webpack": "^6.5.1",
"@vercel/analytics": "^0.1.11",
"@vercel/speed-insights": "^1.0.2",
diff --git a/yarn.lock b/yarn.lock
index 49b84a520..93529d3a1 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1717,6 +1717,11 @@
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.0.tgz#a5417ae8427873f1dd08b70b3574b453e67b5f7f"
integrity sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==
+"@fastify/busboy@^2.0.0":
+ version "2.1.1"
+ resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d"
+ integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==
+
"@fortaine/fetch-event-source@^3.0.6":
version "3.0.6"
resolved "https://registry.yarnpkg.com/@fortaine/fetch-event-source/-/fetch-event-source-3.0.6.tgz#b8552a2ca2c5202f5699b93a92be0188d422b06e"
@@ -1977,6 +1982,20 @@
resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31"
integrity sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==
+"@qdrant/js-client-rest@^1.8.2":
+ version "1.8.2"
+ resolved "https://registry.yarnpkg.com/@qdrant/js-client-rest/-/js-client-rest-1.8.2.tgz#7ea149c791e6c89da931c5a8fd043f61a97aca56"
+ integrity sha512-BCGC4YRcqjRxXVo500CxjhluPpGO0XpOwojauT8675Duv24YTlkhvDRmc1c9k/df2+yH/typtkecK3VOi3CD7A==
+ dependencies:
+ "@qdrant/openapi-typescript-fetch" "1.2.6"
+ "@sevinf/maybe" "0.5.0"
+ undici "~5.28.4"
+
+"@qdrant/openapi-typescript-fetch@1.2.6":
+ version "1.2.6"
+ resolved "https://registry.yarnpkg.com/@qdrant/openapi-typescript-fetch/-/openapi-typescript-fetch-1.2.6.tgz#c2682a9fa26ded86384f421c991f6c461785af7e"
+ integrity sha512-oQG/FejNpItrxRHoyctYvT3rwGZOnK4jr3JdppO/c78ktDvkWiPXPHNsrDf33K9sZdRb6PR7gi4noIapu5q4HA==
+
"@remix-run/router@1.15.3":
version "1.15.3"
resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.15.3.tgz#d2509048d69dbb72d5389a14945339f1430b2d3c"
@@ -1995,6 +2014,11 @@
domhandler "^5.0.3"
selderee "^0.11.0"
+"@sevinf/maybe@0.5.0":
+ version "0.5.0"
+ resolved "https://registry.yarnpkg.com/@sevinf/maybe/-/maybe-0.5.0.tgz#e59fcea028df615fe87d708bb30e1f338e46bb44"
+ integrity sha512-ARhyoYDnY1LES3vYI0fiG6e9esWfTNcXcO6+MPJJXcnyMV3bim4lnFt45VXouV7y82F4x3YH8nOQ6VztuvUiWg==
+
"@sinclair/typebox@^0.29.0":
version "0.29.6"
resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.29.6.tgz#4cd8372f9247372edd5fc5af44f67e2032c46e2f"
@@ -8263,6 +8287,13 @@ undici-types@~5.26.4:
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
+undici@~5.28.4:
+ version "5.28.4"
+ resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068"
+ integrity sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==
+ dependencies:
+ "@fastify/busboy" "^2.0.0"
+
unicode-canonical-property-names-ecmascript@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc"