Support reasoning for OpenRouter using OpenAI provider
This commit is contained in:
parent
b966107117
commit
6e082ad7ac
|
@ -2,10 +2,10 @@
|
|||
// azure and openai, using same models. so using same LLMApi.
|
||||
import {
|
||||
ApiPath,
|
||||
OPENAI_BASE_URL,
|
||||
DEFAULT_MODELS,
|
||||
OpenaiPath,
|
||||
Azure,
|
||||
DEFAULT_MODELS,
|
||||
OPENAI_BASE_URL,
|
||||
OpenaiPath,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
} from "@/app/constant";
|
||||
|
@ -18,13 +18,13 @@ import {
|
|||
} from "@/app/store";
|
||||
import { collectModelsWithDefaultModel } from "@/app/utils/model";
|
||||
import {
|
||||
preProcessImageContent,
|
||||
uploadImage,
|
||||
base64Image2Blob,
|
||||
preProcessImageContent,
|
||||
streamWithThink,
|
||||
uploadImage,
|
||||
} from "@/app/utils/chat";
|
||||
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
|
||||
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
|
||||
import { DalleQuality, DalleStyle, ModelSize } from "@/app/typing";
|
||||
|
||||
import {
|
||||
ChatOptions,
|
||||
|
@ -39,9 +39,9 @@ import Locale from "../../locales";
|
|||
import { getClientConfig } from "@/app/config/client";
|
||||
import {
|
||||
getMessageTextContent,
|
||||
isVisionModel,
|
||||
isDalle3 as _isDalle3,
|
||||
getTimeoutMSByModel,
|
||||
isDalle3 as _isDalle3,
|
||||
isVisionModel,
|
||||
} from "@/app/utils";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
|
@ -294,6 +294,14 @@ export class ChatGPTApi implements LLMApi {
|
|||
useChatStore.getState().currentSession().mask?.plugin || [],
|
||||
);
|
||||
// console.log("getAsTools", tools, funcs);
|
||||
|
||||
// Add "include_reasoning" for OpenRouter: https://openrouter.ai/announcements/reasoning-tokens-for-thinking-models
|
||||
const isOpenRouter = chatPath.includes("openrouter.ai");
|
||||
if (isOpenRouter) {
|
||||
// @ts-ignore
|
||||
requestPayload["include_reasoning"] = true;
|
||||
}
|
||||
|
||||
streamWithThink(
|
||||
chatPath,
|
||||
requestPayload,
|
||||
|
@ -310,6 +318,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
content: string;
|
||||
tool_calls: ChatMessageTool[];
|
||||
reasoning_content: string | null;
|
||||
reasoning: string | null;
|
||||
};
|
||||
}>;
|
||||
|
||||
|
@ -335,7 +344,9 @@ export class ChatGPTApi implements LLMApi {
|
|||
}
|
||||
}
|
||||
|
||||
const reasoning = choices[0]?.delta?.reasoning_content;
|
||||
const reasoning = isOpenRouter
|
||||
? choices[0]?.delta?.reasoning
|
||||
: choices[0]?.delta?.reasoning_content;
|
||||
const content = choices[0]?.delta?.content;
|
||||
|
||||
// Skip if both content and reasoning_content are empty or null
|
||||
|
@ -411,6 +422,7 @@ export class ChatGPTApi implements LLMApi {
|
|||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async usage() {
|
||||
const formatDate = (d: Date) =>
|
||||
`${d.getFullYear()}-${(d.getMonth() + 1).toString().padStart(2, "0")}-${d
|
||||
|
@ -514,4 +526,5 @@ export class ChatGPTApi implements LLMApi {
|
|||
}));
|
||||
}
|
||||
}
|
||||
|
||||
export { OpenaiPath };
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import {
|
||||
CACHE_URL_PREFIX,
|
||||
UPLOAD_URL,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
UPLOAD_URL,
|
||||
} from "@/app/constant";
|
||||
import { RequestMessage } from "@/app/client/api";
|
||||
import Locale from "@/app/locales";
|
||||
|
@ -93,6 +93,7 @@ export async function preProcessImageContent(
|
|||
}
|
||||
|
||||
const imageCaches: Record<string, string> = {};
|
||||
|
||||
export function cacheImageToBase64Image(imageUrl: string) {
|
||||
if (imageUrl.includes(CACHE_URL_PREFIX)) {
|
||||
if (!imageCaches[imageUrl]) {
|
||||
|
@ -367,6 +368,7 @@ export function stream(
|
|||
openWhenHidden: true,
|
||||
});
|
||||
}
|
||||
|
||||
console.debug("[ChatAPI] start");
|
||||
chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
|
||||
}
|
||||
|
@ -609,16 +611,9 @@ export function streamWithThink(
|
|||
if (remainText.length > 0) {
|
||||
remainText += "\n";
|
||||
}
|
||||
remainText += "> " + chunk.content;
|
||||
} else {
|
||||
// Handle newlines in thinking content
|
||||
if (chunk.content.includes("\n\n")) {
|
||||
const lines = chunk.content.split("\n\n");
|
||||
remainText += lines.join("\n\n> ");
|
||||
} else {
|
||||
remainText += chunk.content;
|
||||
}
|
||||
remainText += "> ";
|
||||
}
|
||||
remainText += chunk.content.replaceAll("\n", "\n> ");
|
||||
} else {
|
||||
// If in normal mode
|
||||
if (isInThinkingMode || isThinkingChanged) {
|
||||
|
@ -644,6 +639,7 @@ export function streamWithThink(
|
|||
openWhenHidden: true,
|
||||
});
|
||||
}
|
||||
|
||||
console.debug("[ChatAPI] start");
|
||||
chatApi(chatPath, headers, requestPayload, tools); // call fetchEventSource
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue