ChatGPT-Next-Web/app/api/openai.ts

76 lines
2.0 KiB
TypeScript

import type { OpenAIListModelResponse } from '@/app/client/platforms/openai';
import type { NextRequest } from 'next/server';
import { getServerSideConfig } from '@/app/config/server';
import { ModelProvider, OpenaiPath } from '@/app/constant';
import { prettyObject } from '@/app/utils/format';
import { NextResponse } from 'next/server';
import { auth } from './auth';
import { requestOpenai } from './common';
const ALLOWED_PATH = new Set(Object.values(OpenaiPath));
function getModels(remoteModelRes: OpenAIListModelResponse) {
const config = getServerSideConfig();
if (config.disableGPT4) {
remoteModelRes.data = remoteModelRes.data.filter(
m =>
!(m.id.startsWith('gpt-4') || m.id.startsWith('chatgpt-4o') || m.id.startsWith('o1'))
|| m.id.startsWith('gpt-4o-mini'),
);
}
return remoteModelRes;
}
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
console.log('[OpenAI Route] params ', params);
if (req.method === 'OPTIONS') {
return NextResponse.json({ body: 'OK' }, { status: 200 });
}
const subpath = params.path.join('/');
if (!ALLOWED_PATH.has(subpath)) {
console.log('[OpenAI Route] forbidden path ', subpath);
return NextResponse.json(
{
error: true,
msg: `you are not allowed to request ${subpath}`,
},
{
status: 403,
},
);
}
const authResult = auth(req, ModelProvider.GPT);
if (authResult.error) {
return NextResponse.json(authResult, {
status: 401,
});
}
try {
const response = await requestOpenai(req);
// list models
if (subpath === OpenaiPath.ListModelPath && response.status === 200) {
const resJson = (await response.json()) as OpenAIListModelResponse;
const availableModels = getModels(resJson);
return NextResponse.json(availableModels, {
status: response.status,
});
}
return response;
} catch (e) {
console.error('[OpenAI] ', e);
return NextResponse.json(prettyObject(e));
}
}