From 1a15e09da386b787fd0eb8ba66a20a163457a24d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 11 Apr 2025 18:32:27 -0700 Subject: [PATCH] show reasoning content on chat ui --- .../src/components/chat_ui.tsx | 47 +++++++++++++- .../components/chat_ui/ReasoningContent.tsx | 64 +++++++++++++++++++ .../chat_ui/llm_calls/chat_completion.tsx | 14 ++-- .../chat_ui/llm_calls/process_stream.tsx | 28 ++++++++ .../src/components/chat_ui/types.ts | 37 +++++++++++ 5 files changed, 182 insertions(+), 8 deletions(-) create mode 100644 ui/litellm-dashboard/src/components/chat_ui/ReasoningContent.tsx create mode 100644 ui/litellm-dashboard/src/components/chat_ui/llm_calls/process_stream.tsx create mode 100644 ui/litellm-dashboard/src/components/chat_ui/types.ts diff --git a/ui/litellm-dashboard/src/components/chat_ui.tsx b/ui/litellm-dashboard/src/components/chat_ui.tsx index 7c52d4a357..2da516891a 100644 --- a/ui/litellm-dashboard/src/components/chat_ui.tsx +++ b/ui/litellm-dashboard/src/components/chat_ui.tsx @@ -33,6 +33,8 @@ import { coy } from 'react-syntax-highlighter/dist/esm/styles/prism'; import EndpointSelector from "./chat_ui/EndpointSelector"; import TagSelector from "./tag_management/TagSelector"; import { determineEndpointType } from "./chat_ui/EndpointUtils"; +import { MessageType } from "./chat_ui/types"; +import ReasoningContent from "./chat_ui/ReasoningContent"; import { SendOutlined, ApiOutlined, @@ -65,7 +67,7 @@ const ChatUI: React.FC = ({ ); const [apiKey, setApiKey] = useState(""); const [inputMessage, setInputMessage] = useState(""); - const [chatHistory, setChatHistory] = useState<{ role: string; content: string; model?: string; isImage?: boolean }[]>([]); + const [chatHistory, setChatHistory] = useState([]); const [selectedModel, setSelectedModel] = useState( undefined ); @@ -138,7 +140,11 @@ const ChatUI: React.FC = ({ if (lastMessage && lastMessage.role === role && !lastMessage.isImage) { return [ ...prevHistory.slice(0, prevHistory.length - 1), - { role, content: lastMessage.content + chunk, model }, + { + ...lastMessage, + content: lastMessage.content + chunk, + model + }, ]; } else { return [...prevHistory, { role, content: chunk, model }]; @@ -146,6 +152,37 @@ const ChatUI: React.FC = ({ }); }; + const updateReasoningContent = (chunk: string) => { + setChatHistory((prevHistory) => { + const lastMessage = prevHistory[prevHistory.length - 1]; + + if (lastMessage && lastMessage.role === "assistant" && !lastMessage.isImage) { + return [ + ...prevHistory.slice(0, prevHistory.length - 1), + { + ...lastMessage, + reasoningContent: (lastMessage.reasoningContent || "") + chunk + }, + ]; + } else { + // If there's no assistant message yet, we'll create one with empty content + // but with reasoning content + if (prevHistory.length > 0 && prevHistory[prevHistory.length - 1].role === "user") { + return [ + ...prevHistory, + { + role: "assistant", + content: "", + reasoningContent: chunk + } + ]; + } + + return prevHistory; + } + }); + }; + const updateImageUI = (imageUrl: string, model: string) => { setChatHistory((prevHistory) => [ ...prevHistory, @@ -206,7 +243,8 @@ const ChatUI: React.FC = ({ selectedModel, effectiveApiKey, selectedTags, - signal + signal, + updateReasoningContent ); } else if (endpointType === EndpointType.IMAGE) { // For image generation @@ -410,6 +448,9 @@ const ChatUI: React.FC = ({ )} + {message.reasoningContent && ( + + )}
{message.isImage ? ( = ({ reasoningContent }) => { + const [isExpanded, setIsExpanded] = useState(true); + + if (!reasoningContent) return null; + + return ( +
+ + + {isExpanded && ( +
+ & { + inline?: boolean; + node?: any; + }) { + const match = /language-(\w+)/.exec(className || ''); + return !inline && match ? ( + + {String(children).replace(/\n$/, '')} + + ) : ( + + {children} + + ); + } + }} + > + {reasoningContent} + +
+ )} +
+ ); +}; + +export default ReasoningContent; \ No newline at end of file diff --git a/ui/litellm-dashboard/src/components/chat_ui/llm_calls/chat_completion.tsx b/ui/litellm-dashboard/src/components/chat_ui/llm_calls/chat_completion.tsx index a5a44e94f6..7d1bdab66f 100644 --- a/ui/litellm-dashboard/src/components/chat_ui/llm_calls/chat_completion.tsx +++ b/ui/litellm-dashboard/src/components/chat_ui/llm_calls/chat_completion.tsx @@ -1,14 +1,16 @@ import openai from "openai"; import { ChatCompletionMessageParam } from "openai/resources/chat/completions"; import { message } from "antd"; +import { processStreamingResponse } from "./process_stream"; export async function makeOpenAIChatCompletionRequest( chatHistory: { role: string; content: string }[], - updateUI: (chunk: string, model: string) => void, + updateUI: (chunk: string, model?: string) => void, selectedModel: string, accessToken: string, tags?: string[], - signal?: AbortSignal + signal?: AbortSignal, + onReasoningContent?: (content: string) => void ) { // base url should be the current base_url const isLocal = process.env.NODE_ENV === "development"; @@ -35,9 +37,11 @@ export async function makeOpenAIChatCompletionRequest( for await (const chunk of response) { console.log(chunk); - if (chunk.choices[0].delta.content) { - updateUI(chunk.choices[0].delta.content, chunk.model); - } + // Process the chunk using our utility + processStreamingResponse(chunk, { + onContent: updateUI, + onReasoningContent: onReasoningContent || (() => {}) + }); } } catch (error) { if (signal?.aborted) { diff --git a/ui/litellm-dashboard/src/components/chat_ui/llm_calls/process_stream.tsx b/ui/litellm-dashboard/src/components/chat_ui/llm_calls/process_stream.tsx new file mode 100644 index 0000000000..3af3fadc01 --- /dev/null +++ b/ui/litellm-dashboard/src/components/chat_ui/llm_calls/process_stream.tsx @@ -0,0 +1,28 @@ +import { StreamingResponse } from "../types"; + +export interface StreamProcessCallbacks { + onContent: (content: string, model?: string) => void; + onReasoningContent: (content: string) => void; +} + +export const processStreamingResponse = ( + response: StreamingResponse, + callbacks: StreamProcessCallbacks +) => { + // Extract model information if available + const model = response.model; + + // Process regular content + if (response.choices && response.choices.length > 0) { + const choice = response.choices[0]; + + if (choice.delta?.content) { + callbacks.onContent(choice.delta.content, model); + } + + // Process reasoning content if it exists + if (choice.delta?.reasoning_content) { + callbacks.onReasoningContent(choice.delta.reasoning_content); + } + } +}; \ No newline at end of file diff --git a/ui/litellm-dashboard/src/components/chat_ui/types.ts b/ui/litellm-dashboard/src/components/chat_ui/types.ts new file mode 100644 index 0000000000..9ddc69af5e --- /dev/null +++ b/ui/litellm-dashboard/src/components/chat_ui/types.ts @@ -0,0 +1,37 @@ +export interface Delta { + content?: string; + reasoning_content?: string; + role?: string; + function_call?: any; + tool_calls?: any; + audio?: any; + refusal?: any; + provider_specific_fields?: any; +} + +export interface StreamingChoices { + finish_reason?: string | null; + index: number; + delta: Delta; + logprobs?: any; +} + +export interface StreamingResponse { + id: string; + created: number; + model: string; + object: string; + system_fingerprint?: string; + choices: StreamingChoices[]; + provider_specific_fields?: any; + stream_options?: any; + citations?: any; +} + +export interface MessageType { + role: string; + content: string; + model?: string; + isImage?: boolean; + reasoningContent?: string; +} \ No newline at end of file