mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
working image generation on chat ui
This commit is contained in:
parent
e44318c605
commit
6ffe3f1e46
2 changed files with 138 additions and 47 deletions
|
@ -25,6 +25,7 @@ import {
|
|||
import { message, Select } from "antd";
|
||||
import { modelAvailableCall } from "./networking";
|
||||
import { makeOpenAIChatCompletionRequest } from "./chat_ui/llm_calls/chat_completion";
|
||||
import { makeOpenAIImageGenerationRequest } from "./chat_ui/llm_calls/image_generation";
|
||||
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
||||
import { Typography } from "antd";
|
||||
import { coy } from 'react-syntax-highlighter/dist/esm/styles/prism';
|
||||
|
@ -49,13 +50,14 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
);
|
||||
const [apiKey, setApiKey] = useState("");
|
||||
const [inputMessage, setInputMessage] = useState("");
|
||||
const [chatHistory, setChatHistory] = useState<{ role: string; content: string; model?: string }[]>([]);
|
||||
const [chatHistory, setChatHistory] = useState<{ role: string; content: string; model?: string; isImage?: boolean }[]>([]);
|
||||
const [selectedModel, setSelectedModel] = useState<string | undefined>(
|
||||
undefined
|
||||
);
|
||||
const [showCustomModelInput, setShowCustomModelInput] = useState<boolean>(false);
|
||||
const [modelInfo, setModelInfo] = useState<any[]>([]);
|
||||
const customModelTimeout = useRef<NodeJS.Timeout | null>(null);
|
||||
const [endpointType, setEndpointType] = useState<'chat' | 'image'>('chat');
|
||||
|
||||
const chatEndRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
|
@ -67,8 +69,6 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Fetch model info and set the default selected model
|
||||
const fetchModelInfo = async () => {
|
||||
try {
|
||||
|
@ -122,11 +122,11 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
}
|
||||
}, [chatHistory]);
|
||||
|
||||
const updateUI = (role: string, chunk: string, model?: string) => {
|
||||
const updateTextUI = (role: string, chunk: string, model?: string) => {
|
||||
setChatHistory((prevHistory) => {
|
||||
const lastMessage = prevHistory[prevHistory.length - 1];
|
||||
|
||||
if (lastMessage && lastMessage.role === role) {
|
||||
if (lastMessage && lastMessage.role === role && !lastMessage.isImage) {
|
||||
return [
|
||||
...prevHistory.slice(0, prevHistory.length - 1),
|
||||
{ role, content: lastMessage.content + chunk, model },
|
||||
|
@ -137,6 +137,13 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
});
|
||||
};
|
||||
|
||||
const updateImageUI = (imageUrl: string, model: string) => {
|
||||
setChatHistory((prevHistory) => [
|
||||
...prevHistory,
|
||||
{ role: "assistant", content: imageUrl, model, isImage: true }
|
||||
]);
|
||||
};
|
||||
|
||||
const handleKeyDown = (event: React.KeyboardEvent<HTMLInputElement>) => {
|
||||
if (event.key === 'Enter') {
|
||||
handleSendMessage();
|
||||
|
@ -160,24 +167,34 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
// Create message object without model field for API call
|
||||
const newUserMessage = { role: "user", content: inputMessage };
|
||||
|
||||
// Create chat history for API call - strip out model field
|
||||
const apiChatHistory = [...chatHistory.map(({ role, content }) => ({ role, content })), newUserMessage];
|
||||
|
||||
// Update UI with full message object (including model field for display)
|
||||
// Update UI with full message object
|
||||
setChatHistory([...chatHistory, newUserMessage]);
|
||||
|
||||
try {
|
||||
if (selectedModel) {
|
||||
if (endpointType === 'chat') {
|
||||
// Create chat history for API call - strip out model field and isImage field
|
||||
const apiChatHistory = [...chatHistory.filter(msg => !msg.isImage).map(({ role, content }) => ({ role, content })), newUserMessage];
|
||||
|
||||
await makeOpenAIChatCompletionRequest(
|
||||
apiChatHistory,
|
||||
(chunk, model) => updateUI("assistant", chunk, model),
|
||||
(chunk, model) => updateTextUI("assistant", chunk, model),
|
||||
selectedModel,
|
||||
effectiveApiKey
|
||||
);
|
||||
} else {
|
||||
// For image generation
|
||||
await makeOpenAIImageGenerationRequest(
|
||||
inputMessage,
|
||||
(imageUrl, model) => updateImageUI(imageUrl, model),
|
||||
selectedModel,
|
||||
effectiveApiKey
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Error fetching model response", error);
|
||||
updateUI("assistant", "Error fetching model response");
|
||||
console.error("Error fetching response", error);
|
||||
updateTextUI("assistant", "Error fetching response");
|
||||
}
|
||||
|
||||
setInputMessage("");
|
||||
|
@ -198,12 +215,16 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
);
|
||||
}
|
||||
|
||||
const onChange = (value: string) => {
|
||||
const onModelChange = (value: string) => {
|
||||
console.log(`selected ${value}`);
|
||||
setSelectedModel(value);
|
||||
setShowCustomModelInput(value === 'custom');
|
||||
};
|
||||
|
||||
const handleEndpointChange = (value: string) => {
|
||||
setEndpointType(value as 'chat' | 'image');
|
||||
};
|
||||
|
||||
return (
|
||||
<div style={{ width: "100%", position: "relative" }}>
|
||||
<Grid className="gap-2 p-8 h-[80vh] w-full mt-2">
|
||||
|
@ -240,10 +261,21 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
)}
|
||||
</Col>
|
||||
<Col className="mx-2">
|
||||
<Text>Endpoint Type:</Text>
|
||||
<Select
|
||||
defaultValue="chat"
|
||||
style={{ width: "350px", marginBottom: "12px" }}
|
||||
onChange={handleEndpointChange}
|
||||
options={[
|
||||
{ value: 'chat', label: '/chat/completions' },
|
||||
{ value: 'image', label: '/images/generations' }
|
||||
]}
|
||||
/>
|
||||
|
||||
<Text>Select Model:</Text>
|
||||
<Select
|
||||
placeholder="Select a Model"
|
||||
onChange={onChange}
|
||||
onChange={onModelChange}
|
||||
options={[
|
||||
...modelInfo,
|
||||
{ value: 'custom', label: 'Enter custom model' }
|
||||
|
@ -322,6 +354,13 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
wordBreak: "break-word",
|
||||
maxWidth: "100%"
|
||||
}}>
|
||||
{message.isImage ? (
|
||||
<img
|
||||
src={message.content}
|
||||
alt="Generated image"
|
||||
style={{ maxWidth: '100%', maxHeight: '500px' }}
|
||||
/>
|
||||
) : (
|
||||
<ReactMarkdown
|
||||
components={{
|
||||
code({node, inline, className, children, ...props}: React.ComponentPropsWithoutRef<'code'> & {
|
||||
|
@ -348,6 +387,7 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
>
|
||||
{message.content}
|
||||
</ReactMarkdown>
|
||||
)}
|
||||
</div>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
|
@ -369,13 +409,13 @@ const ChatUI: React.FC<ChatUIProps> = ({
|
|||
value={inputMessage}
|
||||
onChange={(e) => setInputMessage(e.target.value)}
|
||||
onKeyDown={handleKeyDown}
|
||||
placeholder="Type your message..."
|
||||
placeholder={endpointType === 'chat' ? "Type your message..." : "Describe the image you want to generate..."}
|
||||
/>
|
||||
<Button
|
||||
onClick={handleSendMessage}
|
||||
className="ml-2"
|
||||
>
|
||||
Send
|
||||
{endpointType === 'chat' ? "Send" : "Generate"}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
import openai from "openai";
|
||||
import { message } from "antd";
|
||||
|
||||
export async function makeOpenAIImageGenerationRequest(
|
||||
prompt: string,
|
||||
updateUI: (imageUrl: string, model: string) => void,
|
||||
selectedModel: string,
|
||||
accessToken: string
|
||||
) {
|
||||
// base url should be the current base_url
|
||||
const isLocal = process.env.NODE_ENV === "development";
|
||||
if (isLocal !== true) {
|
||||
console.log = function () {};
|
||||
}
|
||||
console.log("isLocal:", isLocal);
|
||||
const proxyBaseUrl = isLocal
|
||||
? "http://localhost:4000"
|
||||
: window.location.origin;
|
||||
const client = new openai.OpenAI({
|
||||
apiKey: accessToken,
|
||||
baseURL: proxyBaseUrl,
|
||||
dangerouslyAllowBrowser: true,
|
||||
});
|
||||
|
||||
try {
|
||||
const response = await client.images.generate({
|
||||
model: selectedModel,
|
||||
prompt: prompt,
|
||||
});
|
||||
|
||||
console.log(response.data);
|
||||
|
||||
if (response.data && response.data[0]) {
|
||||
// Handle either URL or base64 data from response
|
||||
if (response.data[0].url) {
|
||||
// Use the URL directly
|
||||
updateUI(response.data[0].url, selectedModel);
|
||||
} else if (response.data[0].b64_json) {
|
||||
// Convert base64 to data URL format
|
||||
const base64Data = response.data[0].b64_json;
|
||||
updateUI(`data:image/png;base64,${base64Data}`, selectedModel);
|
||||
} else {
|
||||
throw new Error("No image data found in response");
|
||||
}
|
||||
} else {
|
||||
throw new Error("Invalid response format");
|
||||
}
|
||||
} catch (error) {
|
||||
message.error(`Error occurred while generating image. Please try again. Error: ${error}`, 20);
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue