diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 30843173c..b6aacb83b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -145,6 +145,20 @@ repos: pass_filenames: false require_serial: true files: ^.github/workflows/.*$ + - id: ui-prettier + name: Format UI code with Prettier + entry: bash -c 'cd llama_stack/ui && npm run format' + language: system + files: ^llama_stack/ui/.*\.(ts|tsx)$ + pass_filenames: false + require_serial: true + - id: ui-eslint + name: Lint UI code with ESLint + entry: bash -c 'cd llama_stack/ui && npm run lint -- --fix --quiet' + language: system + files: ^llama_stack/ui/.*\.(ts|tsx)$ + pass_filenames: false + require_serial: true ci: autofix_commit_msg: 🎨 [pre-commit.ci] Auto format from pre-commit.com hooks diff --git a/llama_stack/ui/.prettierignore b/llama_stack/ui/.prettierignore index 1b8ac8894..b737ae6ed 100644 --- a/llama_stack/ui/.prettierignore +++ b/llama_stack/ui/.prettierignore @@ -1,3 +1,12 @@ # Ignore artifacts: build coverage +.next +node_modules +dist +*.lock +*.log + +# Generated files +*.min.js +*.min.css diff --git a/llama_stack/ui/.prettierrc b/llama_stack/ui/.prettierrc index 0967ef424..059475a24 100644 --- a/llama_stack/ui/.prettierrc +++ b/llama_stack/ui/.prettierrc @@ -1 +1,10 @@ -{} +{ + "semi": true, + "trailingComma": "es5", + "singleQuote": false, + "printWidth": 80, + "tabWidth": 2, + "useTabs": false, + "bracketSpacing": true, + "arrowParens": "avoid" +} diff --git a/llama_stack/ui/app/api/fetch-url/route.ts b/llama_stack/ui/app/api/fetch-url/route.ts index dac6fecac..7a237ab82 100644 --- a/llama_stack/ui/app/api/fetch-url/route.ts +++ b/llama_stack/ui/app/api/fetch-url/route.ts @@ -1,47 +1,45 @@ -import { NextRequest, NextResponse } from 'next/server'; +import { NextRequest, NextResponse } from "next/server"; export async function POST(request: NextRequest) { try { const { url } = await request.json(); - if (!url || typeof url !== 'string') { - return NextResponse.json( - { error: 'URL is required' }, - { status: 400 } - ); + if (!url || typeof url !== "string") { + return NextResponse.json({ error: "URL is required" }, { status: 400 }); } // Fetch the URL content const response = await fetch(url, { headers: { - 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36' - } + "User-Agent": + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", + }, }); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`); } - const contentType = response.headers.get('content-type') || ''; + const contentType = response.headers.get("content-type") || ""; let content: string; - if (contentType.includes('application/json')) { + if (contentType.includes("application/json")) { const json = await response.json(); content = JSON.stringify(json, null, 2); - } else if (contentType.includes('text/html')) { + } else if (contentType.includes("text/html")) { const html = await response.text(); // Basic HTML to text conversion - remove tags and decode entities content = html - .replace(/)<[^<]*)*<\/script>/gi, '') - .replace(/)<[^<]*)*<\/style>/gi, '') - .replace(/<[^>]*>/g, '') - .replace(/ /g, ' ') - .replace(/&/g, '&') - .replace(/</g, '<') - .replace(/>/g, '>') + .replace(/)<[^<]*)*<\/script>/gi, "") + .replace(/)<[^<]*)*<\/style>/gi, "") + .replace(/<[^>]*>/g, "") + .replace(/ /g, " ") + .replace(/&/g, "&") + .replace(/</g, "<") + .replace(/>/g, ">") .replace(/"/g, '"') .replace(/'/g, "'") - .replace(/\s+/g, ' ') + .replace(/\s+/g, " ") .trim(); } else { content = await response.text(); @@ -49,9 +47,9 @@ export async function POST(request: NextRequest) { return NextResponse.json({ content }); } catch (error) { - console.error('Error fetching URL:', error); + console.error("Error fetching URL:", error); return NextResponse.json( - { error: 'Failed to fetch URL content' }, + { error: "Failed to fetch URL content" }, { status: 500 } ); } diff --git a/llama_stack/ui/app/api/upload-document/route.ts b/llama_stack/ui/app/api/upload-document/route.ts new file mode 100644 index 000000000..d1295dbb3 --- /dev/null +++ b/llama_stack/ui/app/api/upload-document/route.ts @@ -0,0 +1,51 @@ +import { NextRequest, NextResponse } from "next/server"; + +export async function POST(request: NextRequest) { + try { + const formData = await request.formData(); + const file = formData.get("file") as File; + const vectorDbId = formData.get("vectorDbId") as string; + + if (!file || !vectorDbId) { + return NextResponse.json( + { error: "File and vectorDbId are required" }, + { status: 400 } + ); + } + + // Read file content based on type + let content: string; + const mimeType = file.type || "application/octet-stream"; + + if (mimeType === "text/plain" || mimeType === "text/markdown") { + content = await file.text(); + } else if (mimeType === "application/pdf") { + // For PDFs, convert to base64 on the server side + const arrayBuffer = await file.arrayBuffer(); + const bytes = new Uint8Array(arrayBuffer); + let binary = ""; + for (let i = 0; i < bytes.byteLength; i++) { + binary += String.fromCharCode(bytes[i]); + } + const base64 = btoa(binary); + content = `data:${mimeType};base64,${base64}`; + } else { + // Try to read as text + content = await file.text(); + } + + // Return the processed content for the client to send to RagTool + return NextResponse.json({ + content, + mimeType, + fileName: file.name, + fileSize: file.size, + }); + } catch (error) { + console.error("Error processing file upload:", error); + return NextResponse.json( + { error: "Failed to process file upload" }, + { status: 500 } + ); + } +} diff --git a/llama_stack/ui/app/api/v1/[...path]/route.ts b/llama_stack/ui/app/api/v1/[...path]/route.ts index 1959f9099..51c1f8004 100644 --- a/llama_stack/ui/app/api/v1/[...path]/route.ts +++ b/llama_stack/ui/app/api/v1/[...path]/route.ts @@ -47,7 +47,7 @@ async function proxyRequest(request: NextRequest, method: string) { const responseText = await response.text(); console.log( - `Response from FastAPI: ${response.status} ${response.statusText}`, + `Response from FastAPI: ${response.status} ${response.statusText}` ); // Create response with same status and headers @@ -74,7 +74,7 @@ async function proxyRequest(request: NextRequest, method: string) { backend_url: BACKEND_URL, timestamp: new Date().toISOString(), }, - { status: 500 }, + { status: 500 } ); } } diff --git a/llama_stack/ui/app/auth/signin/page.tsx b/llama_stack/ui/app/auth/signin/page.tsx index c9510fd6b..0ccb4a397 100644 --- a/llama_stack/ui/app/auth/signin/page.tsx +++ b/llama_stack/ui/app/auth/signin/page.tsx @@ -51,9 +51,9 @@ export default function SignInPage() { onClick={() => { console.log("Signing in with GitHub..."); signIn("github", { callbackUrl: "/auth/signin" }).catch( - (error) => { + error => { console.error("Sign in error:", error); - }, + } ); }} className="w-full" diff --git a/llama_stack/ui/app/chat-playground/page.tsx b/llama_stack/ui/app/chat-playground/page.tsx index 86efe3d5c..c86702125 100644 --- a/llama_stack/ui/app/chat-playground/page.tsx +++ b/llama_stack/ui/app/chat-playground/page.tsx @@ -16,67 +16,96 @@ import { useAuthClient } from "@/hooks/use-auth-client"; import type { CompletionCreateParams } from "llama-stack-client/resources/chat/completions"; import type { Model } from "llama-stack-client/resources/models"; import type { VectorDBListResponse } from "llama-stack-client/resources/vector-dbs"; -import { VectorDbManager } from "@/components/vector-db/vector-db-manager"; -import { SessionManager, SessionUtils } from "@/components/chat-playground/session-manager"; +import { VectorDbManager } from "@/components/vector-db/vector-db-manager-simple"; +import { + SessionManager, + SessionUtils, +} from "@/components/chat-playground/session-manager"; +import { DocumentUploader } from "@/components/chat-playground/document-uploader"; + +/** + * Unified Chat Playground + * - Keeps session + system message + VectorDB/RAG & document upload from version B + * - Preserves simple message flow & suggestions/append helpers from version A + * - Uses a single state source of truth: currentSession + */ interface ChatSession { id: string; name: string; messages: Message[]; selectedModel: string; - selectedVectorDb: string; + selectedVectorDb: string; // "none" disables RAG + systemMessage: string; createdAt: number; updatedAt: number; } export default function ChatPlaygroundPage() { - const [currentSession, setCurrentSession] = useState(null); + const [currentSession, setCurrentSession] = useState( + null + ); const [input, setInput] = useState(""); const [isGenerating, setIsGenerating] = useState(false); const [error, setError] = useState(null); + const [models, setModels] = useState([]); const [modelsLoading, setModelsLoading] = useState(true); const [modelsError, setModelsError] = useState(null); + const [vectorDbs, setVectorDbs] = useState([]); const [vectorDbsLoading, setVectorDbsLoading] = useState(true); const [vectorDbsError, setVectorDbsError] = useState(null); - const client = useAuthClient(); + const client = useAuthClient(); const isModelsLoading = modelsLoading ?? true; - // Load current session on mount + // --- Session bootstrapping --- useEffect(() => { - const savedSession = SessionUtils.loadCurrentSession(); - if (savedSession) { - setCurrentSession(savedSession); + const saved = SessionUtils.loadCurrentSession(); + if (saved) { + setCurrentSession(saved); } else { - // Create default session if none exists - will be updated with model when models load - const defaultSession = SessionUtils.createDefaultSession(); + const def = SessionUtils.createDefaultSession(); + // ensure defaults align with our fields + const defaultSession: ChatSession = { + ...def, + selectedModel: "", + selectedVectorDb: "none", + systemMessage: def.systemMessage || "You are a helpful assistant.", + }; setCurrentSession(defaultSession); SessionUtils.saveCurrentSession(defaultSession); } }, []); - // Save session when it changes + // Persist session on change useEffect(() => { - if (currentSession) { - SessionUtils.saveCurrentSession(currentSession); - } + if (currentSession) SessionUtils.saveCurrentSession(currentSession); }, [currentSession]); + // --- Fetch models & vector DBs --- useEffect(() => { const fetchModels = async () => { try { setModelsLoading(true); setModelsError(null); - const modelList = await client.models.list(); - const llmModels = modelList.filter(model => model.model_type === 'llm'); - setModels(llmModels); - if (llmModels.length > 0 && currentSession && !currentSession.selectedModel) { - setCurrentSession(prev => prev ? { ...prev, selectedModel: llmModels[0].identifier } : null); + const list = await client.models.list(); + const llms = list.filter(m => m.model_type === "llm"); + setModels(llms); + if (llms.length > 0) { + setCurrentSession(prev => + prev && !prev.selectedModel + ? { + ...prev, + selectedModel: llms[0].identifier, + updatedAt: Date.now(), + } + : prev + ); } - } catch (err) { - console.error("Error fetching models:", err); + } catch (e) { + console.error("Error fetching models:", e); setModelsError("Failed to fetch available models"); } finally { setModelsLoading(false); @@ -87,10 +116,16 @@ export default function ChatPlaygroundPage() { try { setVectorDbsLoading(true); setVectorDbsError(null); - const vectorDbList = await client.vectorDBs.list(); - setVectorDbs(vectorDbList); - } catch (err) { - console.error("Error fetching vector DBs:", err); + const list = await client.vectorDBs.list(); + setVectorDbs(list); + // default to "none" if not set + setCurrentSession(prev => + prev && !prev.selectedVectorDb + ? { ...prev, selectedVectorDb: "none", updatedAt: Date.now() } + : prev + ); + } catch (e) { + console.error("Error fetching vector DBs:", e); setVectorDbsError("Failed to fetch available vector databases"); } finally { setVectorDbsLoading(false); @@ -101,147 +136,178 @@ export default function ChatPlaygroundPage() { fetchVectorDbs(); }, [client]); + // --- Utilities --- const extractTextContent = (content: unknown): string => { - if (typeof content === 'string') { - return content; - } + if (typeof content === "string") return content; if (Array.isArray(content)) { return content - .filter(item => item && typeof item === 'object' && 'type' in item && item.type === 'text') - .map(item => (item && typeof item === 'object' && 'text' in item) ? String(item.text) : '') - .join(''); + .filter( + item => + item && + typeof item === "object" && + "type" in item && + (item as { type: string }).type === "text" + ) + .map(item => + item && typeof item === "object" && "text" in item + ? String((item as { text: unknown }).text) + : "" + ) + .join(""); } - if (content && typeof content === 'object' && 'type' in content && content.type === 'text' && 'text' in content) { - return String(content.text) || ''; + if ( + content && + typeof content === "object" && + "type" in content && + (content as { type: string }).type === "text" && + "text" in content + ) { + return String((content as { text: unknown }).text) || ""; } - return ''; + return ""; }; - const handleInputChange = (e: React.ChangeEvent) => { + // --- Handlers --- + const handleInputChange = (e: React.ChangeEvent) => setInput(e.target.value); - }; -const handleSubmit = async (event?: { preventDefault?: () => void }) => { - event?.preventDefault?.(); - if (!input.trim() || !currentSession || !currentSession.selectedModel) return; + const handleSubmit = async (event?: { preventDefault?: () => void }) => { + event?.preventDefault?.(); + if (!input.trim() || !currentSession || !currentSession.selectedModel) + return; - // Add user message to chat - const userMessage: Message = { - id: Date.now().toString(), - role: "user", - content: input.trim(), - createdAt: new Date(), - }; - - setCurrentSession(prev => prev ? { - ...prev, - messages: [...prev.messages, userMessage], - updatedAt: Date.now() - } : null); - setInput(""); - - // Use the helper function with the content - await handleSubmitWithContent(userMessage.content); -}; - -const handleSubmitWithContent = async (content: string) => { - setIsGenerating(true); - setError(null); - - try { - let enhancedContent = content; - - // If a vector DB is selected, query for relevant context - if (currentSession?.selectedVectorDb && currentSession.selectedVectorDb !== "none") { - try { - const vectorResponse = await client.vectorIo.query({ - query: content, - vector_db_id: currentSession.selectedVectorDb, - }); - - if (vectorResponse.chunks && vectorResponse.chunks.length > 0) { - const context = vectorResponse.chunks - .map(chunk => { - // Extract text content from the chunk - const chunkContent = typeof chunk.content === 'string' - ? chunk.content - : extractTextContent(chunk.content); - return chunkContent; - }) - .join('\n\n'); - - enhancedContent = `Please answer the following query using the context below.\n\nCONTEXT:\n${context}\n\nQUERY:\n${content}`; - } - } catch (vectorErr) { - console.error("Error querying vector DB:", vectorErr); - // Continue with original content if vector query fails - } - } - - const messageParams: CompletionCreateParams["messages"] = [ - ...(currentSession?.messages || []).map(msg => { - const msgContent = typeof msg.content === 'string' ? msg.content : extractTextContent(msg.content); - if (msg.role === "user") { - return { role: "user" as const, content: msgContent }; - } else if (msg.role === "assistant") { - return { role: "assistant" as const, content: msgContent }; - } else { - return { role: "system" as const, content: msgContent }; - } - }), - { role: "user" as const, content: enhancedContent } - ]; - - const response = await client.chat.completions.create({ - model: currentSession?.selectedModel || "", - messages: messageParams, - stream: true, - }); - - const assistantMessage: Message = { - id: (Date.now() + 1).toString(), - role: "assistant", - content: "", + const userMessage: Message = { + id: Date.now().toString(), + role: "user", + content: input.trim(), createdAt: new Date(), }; - setCurrentSession(prev => prev ? { - ...prev, - messages: [...prev.messages, assistantMessage], - updatedAt: Date.now() - } : null); + setCurrentSession(prev => + prev + ? { + ...prev, + messages: [...prev.messages, userMessage], + updatedAt: Date.now(), + } + : prev + ); + setInput(""); - let fullContent = ""; - for await (const chunk of response) { - if (chunk.choices && chunk.choices[0]?.delta?.content) { - const deltaContent = chunk.choices[0].delta.content; - fullContent += deltaContent; + await handleSubmitWithContent(userMessage.content); + }; - flushSync(() => { - setCurrentSession(prev => { - if (!prev) return null; - const newMessages = [...prev.messages]; - const lastMessage = newMessages[newMessages.length - 1]; - if (lastMessage.role === "assistant") { - lastMessage.content = fullContent; - } - return { ...prev, messages: newMessages, updatedAt: Date.now() }; + const handleSubmitWithContent = async (content: string) => { + setIsGenerating(true); + setError(null); + + try { + let enhancedContent = content; + + // --- RAG augmentation (optional) --- + if ( + currentSession?.selectedVectorDb && + currentSession.selectedVectorDb !== "none" + ) { + try { + const vectorResponse = await client.vectorIo.query({ + query: content, + vector_db_id: currentSession.selectedVectorDb, }); - }); + + if (vectorResponse.chunks && vectorResponse.chunks.length > 0) { + const context = vectorResponse.chunks + .map(chunk => + typeof chunk.content === "string" + ? chunk.content + : extractTextContent(chunk.content) + ) + .join("\n\n"); + + enhancedContent = `Please answer the following query using the context below.\n\nCONTEXT:\n${context}\n\nQUERY:\n${content}`; + } + } catch (vectorErr) { + console.error("Error querying vector DB:", vectorErr); + // proceed without augmentation + } } + + const messageParams: CompletionCreateParams["messages"] = [ + ...(currentSession?.systemMessage + ? [{ role: "system" as const, content: currentSession.systemMessage }] + : []), + ...(currentSession?.messages || []).map(msg => { + const msgContent = + typeof msg.content === "string" + ? msg.content + : extractTextContent(msg.content); + if (msg.role === "user") + return { role: "user" as const, content: msgContent }; + if (msg.role === "assistant") + return { role: "assistant" as const, content: msgContent }; + return { role: "system" as const, content: msgContent }; + }), + { role: "user" as const, content: enhancedContent }, + ]; + + const response = await client.chat.completions.create({ + model: currentSession?.selectedModel || "", + messages: messageParams, + stream: true, + }); + + const assistantMessage: Message = { + id: (Date.now() + 1).toString(), + role: "assistant", + content: "", + createdAt: new Date(), + }; + + setCurrentSession(prev => + prev + ? { + ...prev, + messages: [...prev.messages, assistantMessage], + updatedAt: Date.now(), + } + : prev + ); + + let fullContent = ""; + for await (const chunk of response) { + if (chunk.choices && chunk.choices[0]?.delta?.content) { + const deltaContent = chunk.choices[0].delta.content; + fullContent += deltaContent; + + flushSync(() => { + setCurrentSession(prev => { + if (!prev) return null; + const newMessages = [...prev.messages]; + const last = newMessages[newMessages.length - 1]; + if (last.role === "assistant") last.content = fullContent; + return { ...prev, messages: newMessages, updatedAt: Date.now() }; + }); + }); + } + } + } catch (err) { + console.error("Error sending message:", err); + setError("Failed to send message. Please try again."); + setCurrentSession(prev => + prev + ? { + ...prev, + messages: prev.messages.slice(0, -1), + updatedAt: Date.now(), + } + : prev + ); + } finally { + setIsGenerating(false); } - } catch (err) { - console.error("Error sending message:", err); - setError("Failed to send message. Please try again."); - setCurrentSession(prev => prev ? { - ...prev, - messages: prev.messages.slice(0, -1), - updatedAt: Date.now() - } : null); - } finally { - setIsGenerating(false); - } -}; + }; + + // --- UX helpers --- const suggestions = [ "Write a Python function that prints 'Hello, World!'", "Explain step-by-step how to solve this math problem: If x² + 6x + 9 = 25, what is x?", @@ -255,20 +321,22 @@ const handleSubmitWithContent = async (content: string) => { content: message.content, createdAt: new Date(), }; - setCurrentSession(prev => prev ? { - ...prev, - messages: [...prev.messages, newMessage], - updatedAt: Date.now() - } : null); + setCurrentSession(prev => + prev + ? { + ...prev, + messages: [...prev.messages, newMessage], + updatedAt: Date.now(), + } + : prev + ); handleSubmitWithContent(newMessage.content); }; const clearChat = () => { - setCurrentSession(prev => prev ? { - ...prev, - messages: [], - updatedAt: Date.now() - } : null); + setCurrentSession(prev => + prev ? { ...prev, messages: [], updatedAt: Date.now() } : prev + ); setError(null); }; @@ -278,13 +346,20 @@ const handleSubmitWithContent = async (content: string) => { }; const handleNewSession = () => { - const defaultModel = currentSession?.selectedModel || (models.length > 0 ? models[0].identifier : ""); - const defaultVectorDb = currentSession?.selectedVectorDb || ""; + const defaultModel = + currentSession?.selectedModel || + (models.length > 0 ? models[0].identifier : ""); + const defaultVectorDb = currentSession?.selectedVectorDb || "none"; - const newSession = { + const newSession: ChatSession = { ...SessionUtils.createDefaultSession(), selectedModel: defaultModel, selectedVectorDb: defaultVectorDb, + systemMessage: + currentSession?.systemMessage || "You are a helpful assistant.", + messages: [], + updatedAt: Date.now(), + createdAt: Date.now(), }; setCurrentSession(newSession); SessionUtils.saveCurrentSession(newSession); @@ -305,101 +380,200 @@ const handleSubmitWithContent = async (content: string) => { }; return ( -
-
-
-

Chat Playground

-
- -
- - -
- -
-
- Model: - + Clear Chat +
- -
- Vector DB: - -
- -
- {modelsError && ( -
-

{modelsError}

-
- )} + {/* Main Two-Column Layout */} +
+ {/* Left Column - Configuration Panel */} +
+

+ Settings +

- {vectorDbsError && ( -
-

{vectorDbsError}

-
- )} + {/* Model Configuration */} +
+

+ Model Configuration +

+
+
+ + + {modelsError && ( +

{modelsError}

+ )} +
- {error && ( -
-

{error}

-
- )} +
+ +