ChatGPT-Next-Web/app/client/platforms/moonshot.ts

206 lines
5.6 KiB
TypeScript

'use client';
import type {
ChatMessageTool,
} from '@/app/store';
import type {
ChatOptions,
LLMApi,
LLMModel,
SpeechOptions,
} from '../api';
import type { RequestPayload } from './openai';
import { getClientConfig } from '@/app/config/client';
// azure and openai, using same models. so using same LLMApi.
import {
ApiPath,
Moonshot,
MOONSHOT_BASE_URL,
REQUEST_TIMEOUT_MS,
} from '@/app/constant';
import {
useAccessStore,
useAppConfig,
useChatStore,
usePluginStore,
} from '@/app/store';
import { getMessageTextContent } from '@/app/utils';
import { stream } from '@/app/utils/chat';
import { fetch } from '@/app/utils/stream';
import {
getHeaders,
} from '../api';
export class MoonshotApi implements LLMApi {
private disableListModels = true;
path(path: string): string {
const accessStore = useAccessStore.getState();
let baseUrl = '';
if (accessStore.useCustomConfig) {
baseUrl = accessStore.moonshotUrl;
}
if (baseUrl.length === 0) {
const isApp = !!getClientConfig()?.isApp;
const apiPath = ApiPath.Moonshot;
baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath;
}
if (baseUrl.endsWith('/')) {
baseUrl = baseUrl.slice(0, baseUrl.length - 1);
}
if (!baseUrl.startsWith('http') && !baseUrl.startsWith(ApiPath.Moonshot)) {
baseUrl = `https://${baseUrl}`;
}
console.log('[Proxy Endpoint] ', baseUrl, path);
return [baseUrl, path].join('/');
}
extractMessage(res: any) {
return res.choices?.at(0)?.message?.content ?? '';
}
speech(options: SpeechOptions): Promise<ArrayBuffer> {
throw new Error('Method not implemented.');
}
async chat(options: ChatOptions) {
const messages: ChatOptions['messages'] = [];
for (const v of options.messages) {
const content = getMessageTextContent(v);
messages.push({ role: v.role, content });
}
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
providerName: options.config.providerName,
},
};
const requestPayload: RequestPayload = {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};
console.log('[Request] openai payload: ', requestPayload);
const shouldStream = !!options.config.stream;
const controller = new AbortController();
options.onController?.(controller);
try {
const chatPath = this.path(Moonshot.ChatPath);
const chatPayload = {
method: 'POST',
body: JSON.stringify(requestPayload),
signal: controller.signal,
headers: getHeaders(),
};
// make a fetch request
const requestTimeoutId = setTimeout(
() => controller.abort(),
REQUEST_TIMEOUT_MS,
);
if (shouldStream) {
const [tools, funcs] = usePluginStore
.getState()
.getAsTools(
useChatStore.getState().currentSession().mask?.plugin || [],
);
return stream(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string;
tool_calls: ChatMessageTool[];
};
}>;
const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) {
const index = tool_calls[0]?.index;
const id = tool_calls[0]?.id;
const args = tool_calls[0]?.function?.arguments;
if (id) {
runTools.push({
id,
type: tool_calls[0]?.type,
function: {
name: tool_calls[0]?.function?.name as string,
arguments: args,
},
});
} else {
// @ts-ignore
runTools[index].function.arguments += args;
}
}
return choices[0]?.delta?.content;
},
// processToolMessage, include tool_calls message and tool call results
(
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
// @ts-ignore
requestPayload?.messages?.splice(
// @ts-ignore
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
},
options,
);
} else {
const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId);
const resJson = await res.json();
const message = this.extractMessage(resJson);
options.onFinish(message, res);
}
} catch (e) {
console.log('[Request] failed to make a chat request', e);
options.onError?.(e as Error);
}
}
async usage() {
return {
used: 0,
total: 0,
};
}
async models(): Promise<LLMModel[]> {
return [];
}
}