This commit is contained in:
GH Action - Upstream Sync 2024-08-06 01:33:38 +00:00
commit 7deb36ee1f
24 changed files with 542 additions and 255 deletions

View File

@ -0,0 +1,64 @@
import { ApiPath } from "@/app/constant";
import { NextRequest, NextResponse } from "next/server";
import { handle as openaiHandler } from "../../openai";
import { handle as azureHandler } from "../../azure";
import { handle as googleHandler } from "../../google";
import { handle as anthropicHandler } from "../../anthropic";
import { handle as baiduHandler } from "../../baidu";
import { handle as bytedanceHandler } from "../../bytedance";
import { handle as alibabaHandler } from "../../alibaba";
import { handle as moonshotHandler } from "../../moonshot";
import { handle as stabilityHandler } from "../../stability";
async function handle(
req: NextRequest,
{ params }: { params: { provider: string; path: string[] } },
) {
const apiPath = `/api/${params.provider}`;
console.log(`[${params.provider} Route] params `, params);
switch (apiPath) {
case ApiPath.Azure:
return azureHandler(req, { params });
case ApiPath.Google:
return googleHandler(req, { params });
case ApiPath.Anthropic:
return anthropicHandler(req, { params });
case ApiPath.Baidu:
return baiduHandler(req, { params });
case ApiPath.ByteDance:
return bytedanceHandler(req, { params });
case ApiPath.Alibaba:
return alibabaHandler(req, { params });
// case ApiPath.Tencent: using "/api/tencent"
case ApiPath.Moonshot:
return moonshotHandler(req, { params });
case ApiPath.Stability:
return stabilityHandler(req, { params });
default:
return openaiHandler(req, { params });
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

View File

@ -14,7 +14,7 @@ import type { RequestPayload } from "@/app/client/platforms/openai";
const serverConfig = getServerSideConfig();
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -40,30 +40,6 @@ async function handle(
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest) {
const controller = new AbortController();

View File

@ -9,13 +9,13 @@ import {
} from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth";
import { auth } from "./auth";
import { isModelAvailableInServer } from "@/app/utils/model";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
const ALLOWD_PATH = new Set([Anthropic.ChatPath, Anthropic.ChatPath1]);
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -56,30 +56,6 @@ async function handle(
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
const serverConfig = getServerSideConfig();
async function request(req: NextRequest) {

View File

@ -2,10 +2,10 @@ import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth";
import { requestOpenai } from "../../common";
import { auth } from "./auth";
import { requestOpenai } from "./common";
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -31,27 +31,3 @@ async function handle(
return NextResponse.json(prettyObject(e));
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

View File

@ -14,7 +14,7 @@ import { getAccessToken } from "@/app/utils/baidu";
const serverConfig = getServerSideConfig();
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -52,30 +52,6 @@ async function handle(
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest) {
const controller = new AbortController();

View File

@ -12,7 +12,7 @@ import { isModelAvailableInServer } from "@/app/utils/model";
const serverConfig = getServerSideConfig();
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -38,30 +38,6 @@ async function handle(
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest) {
const controller = new AbortController();

View File

@ -1,5 +1,5 @@
import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth";
import { auth } from "./auth";
import { getServerSideConfig } from "@/app/config/server";
import {
ApiPath,
@ -11,9 +11,9 @@ import { prettyObject } from "@/app/utils/format";
const serverConfig = getServerSideConfig();
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
{ params }: { params: { provider: string; path: string[] } },
) {
console.log("[Google Route] params ", params);

View File

@ -14,7 +14,7 @@ import type { RequestPayload } from "@/app/client/platforms/openai";
const serverConfig = getServerSideConfig();
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -40,30 +40,6 @@ async function handle(
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];
async function request(req: NextRequest) {
const controller = new AbortController();

View File

@ -3,8 +3,8 @@ import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, OpenaiPath } from "@/app/constant";
import { prettyObject } from "@/app/utils/format";
import { NextRequest, NextResponse } from "next/server";
import { auth } from "../../auth";
import { requestOpenai } from "../../common";
import { auth } from "./auth";
import { requestOpenai } from "./common";
const ALLOWD_PATH = new Set(Object.values(OpenaiPath));
@ -20,7 +20,7 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
return remoteModelRes;
}
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -70,27 +70,3 @@ async function handle(
return NextResponse.json(prettyObject(e));
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";
export const preferredRegion = [
"arn1",
"bom1",
"cdg1",
"cle1",
"cpt1",
"dub1",
"fra1",
"gru1",
"hnd1",
"iad1",
"icn1",
"kix1",
"lhr1",
"pdx1",
"sfo1",
"sin1",
"syd1",
];

View File

@ -3,7 +3,7 @@ import { getServerSideConfig } from "@/app/config/server";
import { ModelProvider, STABILITY_BASE_URL } from "@/app/constant";
import { auth } from "@/app/api/auth";
async function handle(
export async function handle(
req: NextRequest,
{ params }: { params: { path: string[] } },
) {
@ -97,8 +97,3 @@ async function handle(
clearTimeout(timeoutId);
}
}
export const GET = handle;
export const POST = handle;
export const runtime = "edge";

View File

@ -6,7 +6,7 @@ import {
ServiceProvider,
} from "../constant";
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
import { ChatGPTApi } from "./platforms/openai";
import { ChatGPTApi, DalleRequestPayload } from "./platforms/openai";
import { GeminiProApi } from "./platforms/google";
import { ClaudeApi } from "./platforms/anthropic";
import { ErnieApi } from "./platforms/baidu";
@ -42,6 +42,7 @@ export interface LLMConfig {
stream?: boolean;
presence_penalty?: number;
frequency_penalty?: number;
size?: DalleRequestPayload["size"];
}
export interface ChatOptions {
@ -64,12 +65,14 @@ export interface LLMModel {
displayName?: string;
available: boolean;
provider: LLMModelProvider;
sorted: number;
}
export interface LLMModelProvider {
id: string;
providerName: string;
providerType: string;
sorted: number;
}
export abstract class LLMApi {
@ -118,6 +121,7 @@ export class ClientApi {
break;
case ModelProvider.Qwen:
this.llm = new QwenApi();
break;
case ModelProvider.Hunyuan:
this.llm = new HunyuanApi();
break;

View File

@ -11,8 +11,13 @@ import {
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { collectModelsWithDefaultModel } from "@/app/utils/model";
import { preProcessImageContent } from "@/app/utils/chat";
import {
preProcessImageContent,
uploadImage,
base64Image2Blob,
} from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { DalleSize } from "@/app/typing";
import {
ChatOptions,
@ -33,6 +38,7 @@ import {
getMessageTextContent,
getMessageImages,
isVisionModel,
isDalle3 as _isDalle3,
} from "@/app/utils";
export interface OpenAIListModelResponse {
@ -58,6 +64,14 @@ export interface RequestPayload {
max_tokens?: number;
}
export interface DalleRequestPayload {
model: string;
prompt: string;
response_format: "url" | "b64_json";
n: number;
size: DalleSize;
}
export class ChatGPTApi implements LLMApi {
private disableListModels = true;
@ -100,20 +114,31 @@ export class ChatGPTApi implements LLMApi {
return cloudflareAIGatewayUrl([baseUrl, path].join("/"));
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? "";
async extractMessage(res: any) {
if (res.error) {
return "```\n" + JSON.stringify(res, null, 4) + "\n```";
}
// dalle3 model return url, using url create image message
if (res.data) {
let url = res.data?.at(0)?.url ?? "";
const b64_json = res.data?.at(0)?.b64_json ?? "";
if (!url && b64_json) {
// uploadImage
url = await uploadImage(base64Image2Blob(b64_json, "image/png"));
}
return [
{
type: "image_url",
image_url: {
url,
},
},
];
}
return res.choices?.at(0)?.message?.content ?? res;
}
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
@ -123,26 +148,52 @@ export class ChatGPTApi implements LLMApi {
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
let requestPayload: RequestPayload | DalleRequestPayload;
// add max_tokens to vision model
if (visionModel && modelConfig.model.includes("preview")) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
const isDalle3 = _isDalle3(options.config.model);
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
);
requestPayload = {
model: options.config.model,
prompt,
// URLs are only valid for 60 minutes after the image has been generated.
response_format: "b64_json", // using b64_json, and save image in CacheStorage
n: 1,
size: options.config?.size ?? "1024x1024",
};
} else {
const visionModel = isVisionModel(options.config.model);
const messages: ChatOptions["messages"] = [];
for (const v of options.messages) {
const content = visionModel
? await preProcessImageContent(v.content)
: getMessageTextContent(v);
messages.push({ role: v.role, content });
}
requestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
// add max_tokens to vision model
if (visionModel && modelConfig.model.includes("preview")) {
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
}
}
console.log("[Request] openai payload: ", requestPayload);
const shouldStream = !!options.config.stream;
const shouldStream = !isDalle3 && !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
@ -168,13 +219,15 @@ export class ChatGPTApi implements LLMApi {
model?.provider?.providerName === ServiceProvider.Azure,
);
chatPath = this.path(
Azure.ChatPath(
(isDalle3 ? Azure.ImagePath : Azure.ChatPath)(
(model?.displayName ?? model?.name) as string,
useCustomConfig ? useAccessStore.getState().azureApiVersion : "",
),
);
} else {
chatPath = this.path(OpenaiPath.ChatPath);
chatPath = this.path(
isDalle3 ? OpenaiPath.ImagePath : OpenaiPath.ChatPath,
);
}
const chatPayload = {
method: "POST",
@ -186,7 +239,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
);
if (shouldStream) {
@ -317,7 +370,7 @@ export class ChatGPTApi implements LLMApi {
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
const message = await this.extractMessage(resJson);
options.onFinish(message);
}
} catch (e) {
@ -411,13 +464,17 @@ export class ChatGPTApi implements LLMApi {
return [];
}
//由于目前 OpenAI 的 disableListModels 默认为 true所以当前实际不会运行到这场
let seq = 1000; //同 Constant.ts 中的排序保持一致
return chatModels.map((m) => ({
name: m.id,
available: true,
sorted: seq++,
provider: {
id: "openai",
providerName: "OpenAI",
providerType: "openai",
sorted: 1,
},
}));
}

View File

@ -37,6 +37,7 @@ import AutoIcon from "../icons/auto.svg";
import BottomIcon from "../icons/bottom.svg";
import StopIcon from "../icons/pause.svg";
import RobotIcon from "../icons/robot.svg";
import SizeIcon from "../icons/size.svg";
import PluginIcon from "../icons/plugin.svg";
import {
@ -60,6 +61,7 @@ import {
getMessageTextContent,
getMessageImages,
isVisionModel,
isDalle3,
} from "../utils";
import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
@ -67,6 +69,7 @@ import { uploadImage as uploadImageRemote } from "@/app/utils/chat";
import dynamic from "next/dynamic";
import { ChatControllerPool } from "../client/controller";
import { DalleSize } from "../typing";
import { Prompt, usePromptStore } from "../store/prompt";
import Locale from "../locales";
@ -481,6 +484,11 @@ export function ChatActions(props: {
const [showPluginSelector, setShowPluginSelector] = useState(false);
const [showUploadImage, setShowUploadImage] = useState(false);
const [showSizeSelector, setShowSizeSelector] = useState(false);
const dalle3Sizes: DalleSize[] = ["1024x1024", "1792x1024", "1024x1792"];
const currentSize =
chatStore.currentSession().mask.modelConfig?.size ?? "1024x1024";
useEffect(() => {
const show = isVisionModel(currentModel);
setShowUploadImage(show);
@ -624,6 +632,33 @@ export function ChatActions(props: {
/>
)}
{isDalle3(currentModel) && (
<ChatAction
onClick={() => setShowSizeSelector(true)}
text={currentSize}
icon={<SizeIcon />}
/>
)}
{showSizeSelector && (
<Selector
defaultSelectedValue={currentSize}
items={dalle3Sizes.map((m) => ({
title: m,
value: m,
}))}
onClose={() => setShowSizeSelector(false)}
onSelection={(s) => {
if (s.length === 0) return;
const size = s[0];
chatStore.updateCurrentSession((session) => {
session.mask.modelConfig.size = size;
});
showToast(size);
}}
/>
)}
<ChatAction
onClick={() => setShowPluginSelector(true)}
text={Locale.Plugin.Name}

View File

@ -146,6 +146,7 @@ export const Anthropic = {
export const OpenaiPath = {
ChatPath: "v1/chat/completions",
ImagePath: "v1/images/generations",
UsagePath: "dashboard/billing/usage",
SubsPath: "dashboard/billing/subscription",
ListModelPath: "v1/models",
@ -154,7 +155,10 @@ export const OpenaiPath = {
export const Azure = {
ChatPath: (deployName: string, apiVersion: string) =>
`deployments/${deployName}/chat/completions?api-version=${apiVersion}`,
ExampleEndpoint: "https://{resource-url}/openai/deployments/{deploy-id}",
// https://<your_resource_name>.openai.azure.com/openai/deployments/<your_deployment_name>/images/generations?api-version=<api_version>
ImagePath: (deployName: string, apiVersion: string) =>
`deployments/${deployName}/images/generations?api-version=${apiVersion}`,
ExampleEndpoint: "https://{resource-url}/openai",
};
export const Google = {
@ -256,6 +260,7 @@ const openaiModels = [
"gpt-4-vision-preview",
"gpt-4-turbo-2024-04-09",
"gpt-4-1106-preview",
"dall-e-3",
];
const googleModels = [
@ -320,86 +325,105 @@ const tencentModels = [
const moonshotModes = ["moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"];
let seq = 1000; // 内置的模型序号生成器从1000开始
export const DEFAULT_MODELS = [
...openaiModels.map((name) => ({
name,
available: true,
sorted: seq++, // Global sequence sort(index)
provider: {
id: "openai",
providerName: "OpenAI",
providerType: "openai",
sorted: 1, // 这里是固定的,确保顺序与之前内置的版本一致
},
})),
...openaiModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "azure",
providerName: "Azure",
providerType: "azure",
sorted: 2,
},
})),
...googleModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "google",
providerName: "Google",
providerType: "google",
sorted: 3,
},
})),
...anthropicModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "anthropic",
providerName: "Anthropic",
providerType: "anthropic",
sorted: 4,
},
})),
...baiduModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "baidu",
providerName: "Baidu",
providerType: "baidu",
sorted: 5,
},
})),
...bytedanceModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "bytedance",
providerName: "ByteDance",
providerType: "bytedance",
sorted: 6,
},
})),
...alibabaModes.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "alibaba",
providerName: "Alibaba",
providerType: "alibaba",
sorted: 7,
},
})),
...tencentModels.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "tencent",
providerName: "Tencent",
providerType: "tencent",
sorted: 8,
},
})),
...moonshotModes.map((name) => ({
name,
available: true,
sorted: seq++,
provider: {
id: "moonshot",
providerName: "Moonshot",
providerType: "moonshot",
sorted: 9,
},
})),
] as const;

1
app/icons/size.svg Normal file
View File

@ -0,0 +1 @@
<?xml version="1.0" encoding="UTF-8"?><svg width="16" height="16" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M42 7H6C4.89543 7 4 7.89543 4 9V39C4 40.1046 4.89543 41 6 41H42C43.1046 41 44 40.1046 44 39V9C44 7.89543 43.1046 7 42 7Z" fill="none" stroke="#333" stroke-width="4"/><path d="M30 30V18L38 30V18" stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round"/><path d="M10 30V18L18 30V18" stroke="#333" stroke-width="4" stroke-linecap="round" stroke-linejoin="round"/><path d="M24 20V21" stroke="#333" stroke-width="4" stroke-linecap="round"/><path d="M24 27V28" stroke="#333" stroke-width="4" stroke-linecap="round"/></svg>

After

Width:  |  Height:  |  Size: 681 B

View File

@ -26,6 +26,7 @@ import { nanoid } from "nanoid";
import { createPersistStore } from "../utils/store";
import { collectModelsWithDefaultModel } from "../utils/model";
import { useAccessStore } from "./access";
import { isDalle3 } from "../utils";
export type ChatMessage = RequestMessage & {
date: string;
@ -541,6 +542,10 @@ export const useChatStore = createPersistStore(
const config = useAppConfig.getState();
const session = get().currentSession();
const modelConfig = session.mask.modelConfig;
// skip summarize when using dalle3?
if (isDalle3(modelConfig.model)) {
return;
}
const api: ClientApi = getClientApi(modelConfig.providerName);

View File

@ -1,4 +1,5 @@
import { LLMModel } from "../client/api";
import { DalleSize } from "../typing";
import { getClientConfig } from "../config/client";
import {
DEFAULT_INPUT_TEMPLATE,
@ -61,6 +62,7 @@ export const DEFAULT_CONFIG = {
compressMessageLengthThreshold: 1000,
enableInjectSystemPrompts: true,
template: config?.template ?? DEFAULT_INPUT_TEMPLATE,
size: "1024x1024" as DalleSize,
},
};

View File

@ -7,3 +7,5 @@ export interface RequestMessage {
role: MessageRole;
content: string;
}
export type DalleSize = "1024x1024" | "1792x1024" | "1024x1792";

View File

@ -266,3 +266,7 @@ export function isVisionModel(model: string) {
visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo
);
}
export function isDalle3(model: string) {
return "dall-e-3" === model;
}

246
app/utils/hmac.ts Normal file
View File

@ -0,0 +1,246 @@
// From https://gist.github.com/guillermodlpa/f6d955f838e9b10d1ef95b8e259b2c58
// From https://gist.github.com/stevendesu/2d52f7b5e1f1184af3b667c0b5e054b8
// To ensure cross-browser support even without a proper SubtleCrypto
// impelmentation (or without access to the impelmentation, as is the case with
// Chrome loaded over HTTP instead of HTTPS), this library can create SHA-256
// HMAC signatures using nothing but raw JavaScript
/* eslint-disable no-magic-numbers, id-length, no-param-reassign, new-cap */
// By giving internal functions names that we can mangle, future calls to
// them are reduced to a single byte (minor space savings in minified file)
const uint8Array = Uint8Array;
const uint32Array = Uint32Array;
const pow = Math.pow;
// Will be initialized below
// Using a Uint32Array instead of a simple array makes the minified code
// a bit bigger (we lose our `unshift()` hack), but comes with huge
// performance gains
const DEFAULT_STATE = new uint32Array(8);
const ROUND_CONSTANTS: number[] = [];
// Reusable object for expanded message
// Using a Uint32Array instead of a simple array makes the minified code
// 7 bytes larger, but comes with huge performance gains
const M = new uint32Array(64);
// After minification the code to compute the default state and round
// constants is smaller than the output. More importantly, this serves as a
// good educational aide for anyone wondering where the magic numbers come
// from. No magic numbers FTW!
function getFractionalBits(n: number) {
return ((n - (n | 0)) * pow(2, 32)) | 0;
}
let n = 2;
let nPrime = 0;
while (nPrime < 64) {
// isPrime() was in-lined from its original function form to save
// a few bytes
let isPrime = true;
// Math.sqrt() was replaced with pow(n, 1/2) to save a few bytes
// var sqrtN = pow(n, 1 / 2);
// So technically to determine if a number is prime you only need to
// check numbers up to the square root. However this function only runs
// once and we're only computing the first 64 primes (up to 311), so on
// any modern CPU this whole function runs in a couple milliseconds.
// By going to n / 2 instead of sqrt(n) we net 8 byte savings and no
// scaling performance cost
for (let factor = 2; factor <= n / 2; factor++) {
if (n % factor === 0) {
isPrime = false;
}
}
if (isPrime) {
if (nPrime < 8) {
DEFAULT_STATE[nPrime] = getFractionalBits(pow(n, 1 / 2));
}
ROUND_CONSTANTS[nPrime] = getFractionalBits(pow(n, 1 / 3));
nPrime++;
}
n++;
}
// For cross-platform support we need to ensure that all 32-bit words are
// in the same endianness. A UTF-8 TextEncoder will return BigEndian data,
// so upon reading or writing to our ArrayBuffer we'll only swap the bytes
// if our system is LittleEndian (which is about 99% of CPUs)
const LittleEndian = !!new uint8Array(new uint32Array([1]).buffer)[0];
function convertEndian(word: number) {
if (LittleEndian) {
return (
// byte 1 -> byte 4
(word >>> 24) |
// byte 2 -> byte 3
(((word >>> 16) & 0xff) << 8) |
// byte 3 -> byte 2
((word & 0xff00) << 8) |
// byte 4 -> byte 1
(word << 24)
);
} else {
return word;
}
}
function rightRotate(word: number, bits: number) {
return (word >>> bits) | (word << (32 - bits));
}
function sha256(data: Uint8Array) {
// Copy default state
const STATE = DEFAULT_STATE.slice();
// Caching this reduces occurrences of ".length" in minified JavaScript
// 3 more byte savings! :D
const legth = data.length;
// Pad data
const bitLength = legth * 8;
const newBitLength = 512 - ((bitLength + 64) % 512) - 1 + bitLength + 65;
// "bytes" and "words" are stored BigEndian
const bytes = new uint8Array(newBitLength / 8);
const words = new uint32Array(bytes.buffer);
bytes.set(data, 0);
// Append a 1
bytes[legth] = 0b10000000;
// Store length in BigEndian
words[words.length - 1] = convertEndian(bitLength);
// Loop iterator (avoid two instances of "var") -- saves 2 bytes
let round;
// Process blocks (512 bits / 64 bytes / 16 words at a time)
for (let block = 0; block < newBitLength / 32; block += 16) {
const workingState = STATE.slice();
// Rounds
for (round = 0; round < 64; round++) {
let MRound;
// Expand message
if (round < 16) {
// Convert to platform Endianness for later math
MRound = convertEndian(words[block + round]);
} else {
const gamma0x = M[round - 15];
const gamma1x = M[round - 2];
MRound =
M[round - 7] +
M[round - 16] +
(rightRotate(gamma0x, 7) ^
rightRotate(gamma0x, 18) ^
(gamma0x >>> 3)) +
(rightRotate(gamma1x, 17) ^
rightRotate(gamma1x, 19) ^
(gamma1x >>> 10));
}
// M array matches platform endianness
M[round] = MRound |= 0;
// Computation
const t1 =
(rightRotate(workingState[4], 6) ^
rightRotate(workingState[4], 11) ^
rightRotate(workingState[4], 25)) +
((workingState[4] & workingState[5]) ^
(~workingState[4] & workingState[6])) +
workingState[7] +
MRound +
ROUND_CONSTANTS[round];
const t2 =
(rightRotate(workingState[0], 2) ^
rightRotate(workingState[0], 13) ^
rightRotate(workingState[0], 22)) +
((workingState[0] & workingState[1]) ^
(workingState[2] & (workingState[0] ^ workingState[1])));
for (let i = 7; i > 0; i--) {
workingState[i] = workingState[i - 1];
}
workingState[0] = (t1 + t2) | 0;
workingState[4] = (workingState[4] + t1) | 0;
}
// Update state
for (round = 0; round < 8; round++) {
STATE[round] = (STATE[round] + workingState[round]) | 0;
}
}
// Finally the state needs to be converted to BigEndian for output
// And we want to return a Uint8Array, not a Uint32Array
return new uint8Array(
new uint32Array(
STATE.map(function (val) {
return convertEndian(val);
}),
).buffer,
);
}
function hmac(key: Uint8Array, data: ArrayLike<number>) {
if (key.length > 64) key = sha256(key);
if (key.length < 64) {
const tmp = new Uint8Array(64);
tmp.set(key, 0);
key = tmp;
}
// Generate inner and outer keys
const innerKey = new Uint8Array(64);
const outerKey = new Uint8Array(64);
for (let i = 0; i < 64; i++) {
innerKey[i] = 0x36 ^ key[i];
outerKey[i] = 0x5c ^ key[i];
}
// Append the innerKey
const msg = new Uint8Array(data.length + 64);
msg.set(innerKey, 0);
msg.set(data, 64);
// Has the previous message and append the outerKey
const result = new Uint8Array(64 + 32);
result.set(outerKey, 0);
result.set(sha256(msg), 64);
// Hash the previous message
return sha256(result);
}
// Convert a string to a Uint8Array, SHA-256 it, and convert back to string
const encoder = new TextEncoder();
export function sign(
inputKey: string | Uint8Array,
inputData: string | Uint8Array,
) {
const key =
typeof inputKey === "string" ? encoder.encode(inputKey) : inputKey;
const data =
typeof inputData === "string" ? encoder.encode(inputData) : inputData;
return hmac(key, data);
}
export function hex(bin: Uint8Array) {
return bin.reduce((acc, val) => {
const hexVal = "00" + val.toString(16);
return acc + hexVal.substring(hexVal.length - 2);
}, "");
}
export function hash(str: string) {
return hex(sha256(encoder.encode(str)));
}
export function hashWithSecret(str: string, secret: string) {
return hex(sign(secret, str)).toString();
}

View File

@ -1,12 +1,42 @@
import { DEFAULT_MODELS } from "../constant";
import { LLMModel } from "../client/api";
const CustomSeq = {
val: -1000, //To ensure the custom model located at front, start from -1000, refer to constant.ts
cache: new Map<string, number>(),
next: (id: string) => {
if (CustomSeq.cache.has(id)) {
return CustomSeq.cache.get(id) as number;
} else {
let seq = CustomSeq.val++;
CustomSeq.cache.set(id, seq);
return seq;
}
},
};
const customProvider = (providerName: string) => ({
id: providerName.toLowerCase(),
providerName: providerName,
providerType: "custom",
sorted: CustomSeq.next(providerName),
});
/**
* Sorts an array of models based on specified rules.
*
* First, sorted by provider; if the same, sorted by model
*/
const sortModelTable = (models: ReturnType<typeof collectModels>) =>
models.sort((a, b) => {
if (a.provider && b.provider) {
let cmp = a.provider.sorted - b.provider.sorted;
return cmp === 0 ? a.sorted - b.sorted : cmp;
} else {
return a.sorted - b.sorted;
}
});
export function collectModelTable(
models: readonly LLMModel[],
customModels: string,
@ -17,6 +47,7 @@ export function collectModelTable(
available: boolean;
name: string;
displayName: string;
sorted: number;
provider?: LLMModel["provider"]; // Marked as optional
isDefault?: boolean;
}
@ -84,6 +115,7 @@ export function collectModelTable(
displayName: displayName || customModelName,
available,
provider, // Use optional chaining
sorted: CustomSeq.next(`${customModelName}@${provider?.id}`),
};
}
}
@ -99,13 +131,16 @@ export function collectModelTableWithDefaultModel(
) {
let modelTable = collectModelTable(models, customModels);
if (defaultModel && defaultModel !== "") {
if (defaultModel.includes('@')) {
if (defaultModel.includes("@")) {
if (defaultModel in modelTable) {
modelTable[defaultModel].isDefault = true;
}
} else {
for (const key of Object.keys(modelTable)) {
if (modelTable[key].available && key.split('@').shift() == defaultModel) {
if (
modelTable[key].available &&
key.split("@").shift() == defaultModel
) {
modelTable[key].isDefault = true;
break;
}
@ -123,7 +158,9 @@ export function collectModels(
customModels: string,
) {
const modelTable = collectModelTable(models, customModels);
const allModels = Object.values(modelTable);
let allModels = Object.values(modelTable);
allModels = sortModelTable(allModels);
return allModels;
}
@ -138,7 +175,10 @@ export function collectModelsWithDefaultModel(
customModels,
defaultModel,
);
const allModels = Object.values(modelTable);
let allModels = Object.values(modelTable);
allModels = sortModelTable(allModels);
return allModels;
}

View File

@ -1,19 +1,9 @@
import hash from "hash.js";
import { sign, hash as getHash, hex } from "./hmac";
// 使用 SHA-256 和 secret 进行 HMAC 加密
function sha256(message: any, secret = "", encoding?: string) {
return hash
.hmac(hash.sha256 as any, secret)
.update(message)
.digest(encoding as any);
}
// 使用 SHA-256 进行哈希
function getHash(message: any, encoding = "hex") {
return hash
.sha256()
.update(message)
.digest(encoding as any);
function sha256(message: any, secret: any, encoding?: string) {
const result = sign(secret, message);
return encoding == "hex" ? hex(result).toString() : result;
}
function getDate(timestamp: number) {

View File

@ -26,7 +26,6 @@
"@vercel/speed-insights": "^1.0.2",
"emoji-picker-react": "^4.9.2",
"fuse.js": "^7.0.0",
"hash.js": "^1.1.7",
"heic2any": "^0.0.4",
"html-to-image": "^1.11.11",
"lodash-es": "^4.17.21",

View File

@ -3799,14 +3799,6 @@ has@^1.0.3:
dependencies:
function-bind "^1.1.1"
hash.js@^1.1.7:
version "1.1.7"
resolved "https://registry.npmjs.org/hash.js/-/hash.js-1.1.7.tgz#0babca538e8d4ee4a0f8988d68866537a003cf42"
integrity sha512-taOaskGt4z4SOANNseOviYDvjEJinIkRgmp7LbKP2YTTmVxWBl87s/uzK9r+44BclBSp2X7K1hqeNfz9JbBeXA==
dependencies:
inherits "^2.0.3"
minimalistic-assert "^1.0.1"
hast-util-from-dom@^4.0.0:
version "4.2.0"
resolved "https://registry.yarnpkg.com/hast-util-from-dom/-/hast-util-from-dom-4.2.0.tgz#25836ddecc3cc0849d32749c2a7aec03e94b59a7"
@ -3970,7 +3962,7 @@ inflight@^1.0.4:
once "^1.3.0"
wrappy "1"
inherits@2, inherits@^2.0.3:
inherits@2:
version "2.0.4"
resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c"
integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==
@ -4962,11 +4954,6 @@ mimic-fn@^4.0.0:
resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc"
integrity sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==
minimalistic-assert@^1.0.1:
version "1.0.1"
resolved "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz#2e194de044626d4a10e7f7fbc00ce73e83e4d5c7"
integrity sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A==
minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2:
version "3.1.2"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b"