diff --git a/app/api/deepseek.ts b/app/api/deepseek.ts index a9879eced..40e5753ea 100644 --- a/app/api/deepseek.ts +++ b/app/api/deepseek.ts @@ -85,7 +85,7 @@ async function request(req: NextRequest) { fetchOptions.body = clonedBody; const jsonBody = JSON.parse(clonedBody) as { model?: string }; - + console.log(jsonBody); // not undefined and is false if ( isModelNotavailableInServer( diff --git a/app/client/platforms/deepseek.ts b/app/client/platforms/deepseek.ts index b21d24cef..9dc0ffa87 100644 --- a/app/client/platforms/deepseek.ts +++ b/app/client/platforms/deepseek.ts @@ -65,13 +65,24 @@ export class DeepSeekApi implements LLMApi { async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; - for (const v of options.messages) { - if (v.role === "assistant") { - const content = getMessageTextContentWithoutThinking(v); - messages.push({ role: v.role, content }); - } else { - const content = getMessageTextContent(v); - messages.push({ role: v.role, content }); + if (options.config.model === "deepseek-reasoner") { + // Only take the last message + const lastMessage = options.messages[options.messages.length - 1]; + const content = + lastMessage.role === "assistant" + ? getMessageTextContentWithoutThinking(lastMessage) + : getMessageTextContent(lastMessage); + messages.push({ role: lastMessage.role, content }); + } else { + // Process all messages + for (const v of options.messages) { + if (v.role === "assistant") { + const content = getMessageTextContentWithoutThinking(v); + messages.push({ role: v.role, content }); + } else { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } } }