diff --git a/app/api/anthropic/[...path]/route.ts b/app/api/anthropic/[...path]/route.ts
index 4264893d9..274d7d7c4 100644
--- a/app/api/anthropic/[...path]/route.ts
+++ b/app/api/anthropic/[...path]/route.ts
@@ -9,7 +9,7 @@ import {
 import { prettyObject } from "@/app/utils/format";
 import { NextRequest, NextResponse } from "next/server";
 import { auth } from "../../auth";
-import { collectModelTable } from "@/app/utils/model";
+import { isModelAvailableInServer } from "@/app/utils/model";
 
 const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
 
@@ -136,17 +136,13 @@ async function request(req: NextRequest) {
   // #1815 try to refuse some request to some models
   if (serverConfig.customModels && req.body) {
     try {
-      const modelTable = collectModelTable(
-        DEFAULT_MODELS,
-        serverConfig.customModels,
-      );
       const clonedBody = await req.text();
       fetchOptions.body = clonedBody;
 
       const jsonBody = JSON.parse(clonedBody) as { model?: string };
 
       // not undefined and is false
-      if (modelTable[jsonBody?.model ?? ""].available === false) {
+      if (isModelAvailableInServer(jsonBody?.model ?? "")) {
         return NextResponse.json(
           {
             error: true,
diff --git a/app/api/common.ts b/app/api/common.ts
index a75f2de5c..3e0156569 100644
--- a/app/api/common.ts
+++ b/app/api/common.ts
@@ -1,7 +1,7 @@
 import { NextRequest, NextResponse } from "next/server";
 import { getServerSideConfig } from "../config/server";
 import { DEFAULT_MODELS, OPENAI_BASE_URL, GEMINI_BASE_URL } from "../constant";
-import { collectModelTable } from "../utils/model";
+import { isModelAvailableInServer } from "../utils/model";
 import { makeAzurePath } from "../azure";
 
 const serverConfig = getServerSideConfig();
@@ -83,17 +83,15 @@ export async function requestOpenai(req: NextRequest) {
   // #1815 try to refuse gpt4 request
   if (serverConfig.customModels && req.body) {
     try {
-      const modelTable = collectModelTable(
-        DEFAULT_MODELS,
-        serverConfig.customModels,
-      );
       const clonedBody = await req.text();
       fetchOptions.body = clonedBody;
 
       const jsonBody = JSON.parse(clonedBody) as { model?: string };
 
       // not undefined and is false
-      if (modelTable[jsonBody?.model ?? ""].available === false) {
+      if (
+        isModelAvailableInServer(serverConfig.customModels, jsonBody?.model)
+      ) {
         return NextResponse.json(
           {
             error: true,
@@ -112,16 +110,16 @@ export async function requestOpenai(req: NextRequest) {
   try {
     const res = await fetch(fetchUrl, fetchOptions);
 
-  // Extract the OpenAI-Organization header from the response
-  const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
+    // Extract the OpenAI-Organization header from the response
+    const openaiOrganizationHeader = res.headers.get("OpenAI-Organization");
 
-  // Check if serverConfig.openaiOrgId is defined and not an empty string
-  if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
-    // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
-    console.log("[Org ID]", openaiOrganizationHeader);
-  } else {
-    console.log("[Org ID] is not set up.");
-  }
+    // Check if serverConfig.openaiOrgId is defined and not an empty string
+    if (serverConfig.openaiOrgId && serverConfig.openaiOrgId.trim() !== "") {
+      // If openaiOrganizationHeader is present, log it; otherwise, log that the header is not present
+      console.log("[Org ID]", openaiOrganizationHeader);
+    } else {
+      console.log("[Org ID] is not set up.");
+    }
 
     // to prevent browser prompt for credentials
     const newHeaders = new Headers(res.headers);
@@ -129,7 +127,6 @@ export async function requestOpenai(req: NextRequest) {
     // to disable nginx buffering
     newHeaders.set("X-Accel-Buffering", "no");
 
-
     // Conditionally delete the OpenAI-Organization header from the response if [Org ID] is undefined or empty (not setup in ENV)
     // Also, this is to prevent the header from being sent to the client
     if (!serverConfig.openaiOrgId || serverConfig.openaiOrgId.trim() === "") {
@@ -142,7 +139,6 @@ export async function requestOpenai(req: NextRequest) {
     // The browser will try to decode the response with brotli and fail
     newHeaders.delete("content-encoding");
 
-
     return new Response(res.body, {
       status: res.status,
       statusText: res.statusText,
diff --git a/app/utils/model.ts b/app/utils/model.ts
index 056fff2e9..970c4ea1c 100644
--- a/app/utils/model.ts
+++ b/app/utils/model.ts
@@ -1,3 +1,4 @@
+import { DEFAULT_MODELS } from "../constant";
 import { LLMModel } from "../client/api";
 
 const customProvider = (modelName: string) => ({
@@ -100,3 +101,8 @@ export function collectModelsWithDefaultModel(
   const allModels = Object.values(modelTable);
   return allModels;
 }
+
+export function isModelAvailableInServer(customModels, modelName) {
+  const modelTable = collectModelTable(DEFAULT_MODELS, customModels);
+  return modelTable[modelName ?? ""].available === false;
+}