From 2708312168e8182e4aa3ffb2ee8959a37458fb29 Mon Sep 17 00:00:00 2001 From: ehhuang Date: Thu, 22 May 2025 22:05:54 -0700 Subject: [PATCH] feat(ui): implement chat completion views (#2201) # What does this PR do? Implements table and detail views for chat completions image image ## Test Plan npm run test --- llama_stack/distribution/server/server.py | 12 + llama_stack/ui/.prettierignore | 3 + llama_stack/ui/.prettierrc | 1 + llama_stack/ui/README.md | 3 +- llama_stack/ui/app/layout.tsx | 2 +- .../app/logs/chat-completions/[id]/page.tsx | 62 + .../ui/app/logs/chat-completions/layout.tsx | 45 + .../ui/app/logs/chat-completions/page.tsx | 55 +- .../chat-completion-detail.test.tsx | 193 + .../chat-completion-detail.tsx | 198 + .../chat-completion-table.test.tsx | 340 ++ .../chat-completion-table.tsx | 120 + .../chat-completions/chat-messasge-item.tsx | 107 + .../components/{ => layout}/app-sidebar.tsx | 41 +- .../ui/components/layout/page-breadcrumb.tsx | 49 + llama_stack/ui/components/ui/breadcrumb.tsx | 109 + llama_stack/ui/components/ui/card.tsx | 92 + llama_stack/ui/components/ui/table.tsx | 116 + llama_stack/ui/jest.config.ts | 210 + .../ui/lib/format-message-content.test.ts | 193 + llama_stack/ui/lib/format-message-content.ts | 61 + llama_stack/ui/lib/format-tool-call.tsx | 33 + llama_stack/ui/lib/truncate-text.ts | 8 + llama_stack/ui/lib/types.ts | 44 + llama_stack/ui/lib/{utils.ts => utils.tsx} | 0 llama_stack/ui/package-lock.json | 4655 ++++++++++++++++- llama_stack/ui/package.json | 15 +- 27 files changed, 6729 insertions(+), 38 deletions(-) create mode 100644 llama_stack/ui/.prettierignore create mode 100644 llama_stack/ui/.prettierrc create mode 100644 llama_stack/ui/app/logs/chat-completions/[id]/page.tsx create mode 100644 llama_stack/ui/app/logs/chat-completions/layout.tsx create mode 100644 llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx create mode 100644 llama_stack/ui/components/chat-completions/chat-completion-detail.tsx create mode 100644 llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx create mode 100644 llama_stack/ui/components/chat-completions/chat-completion-table.tsx create mode 100644 llama_stack/ui/components/chat-completions/chat-messasge-item.tsx rename llama_stack/ui/components/{ => layout}/app-sidebar.tsx (50%) create mode 100644 llama_stack/ui/components/layout/page-breadcrumb.tsx create mode 100644 llama_stack/ui/components/ui/breadcrumb.tsx create mode 100644 llama_stack/ui/components/ui/card.tsx create mode 100644 llama_stack/ui/components/ui/table.tsx create mode 100644 llama_stack/ui/jest.config.ts create mode 100644 llama_stack/ui/lib/format-message-content.test.ts create mode 100644 llama_stack/ui/lib/format-message-content.ts create mode 100644 llama_stack/ui/lib/format-tool-call.tsx create mode 100644 llama_stack/ui/lib/truncate-text.ts create mode 100644 llama_stack/ui/lib/types.ts rename llama_stack/ui/lib/{utils.ts => utils.tsx} (100%) diff --git a/llama_stack/distribution/server/server.py b/llama_stack/distribution/server/server.py index 52f2b71b0..7069390cf 100644 --- a/llama_stack/distribution/server/server.py +++ b/llama_stack/distribution/server/server.py @@ -23,6 +23,7 @@ import yaml from fastapi import Body, FastAPI, HTTPException, Request from fastapi import Path as FastapiPath from fastapi.exceptions import RequestValidationError +from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse, StreamingResponse from openai import BadRequestError from pydantic import BaseModel, ValidationError @@ -465,6 +466,17 @@ def main(args: argparse.Namespace | None = None): window_seconds=window_seconds, ) + # --- CORS middleware for local development --- + # TODO: move to reverse proxy + ui_port = os.environ.get("LLAMA_STACK_UI_PORT", 8322) + app.add_middleware( + CORSMiddleware, + allow_origins=[f"http://localhost:{ui_port}"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + try: impls = asyncio.run(construct_stack(config)) except InvalidProviderError as e: diff --git a/llama_stack/ui/.prettierignore b/llama_stack/ui/.prettierignore new file mode 100644 index 000000000..1b8ac8894 --- /dev/null +++ b/llama_stack/ui/.prettierignore @@ -0,0 +1,3 @@ +# Ignore artifacts: +build +coverage diff --git a/llama_stack/ui/.prettierrc b/llama_stack/ui/.prettierrc new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/llama_stack/ui/.prettierrc @@ -0,0 +1 @@ +{} diff --git a/llama_stack/ui/README.md b/llama_stack/ui/README.md index 665619bf1..b6f803509 100644 --- a/llama_stack/ui/README.md +++ b/llama_stack/ui/README.md @@ -1,6 +1,5 @@ ## This is WIP. - We use shadcdn/ui [Shadcn UI](https://ui.shadcn.com/) for the UI components. ## Getting Started @@ -23,4 +22,4 @@ pnpm dev bun dev ``` -Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. +Open [http://localhost:8322](http://localhost:8322) with your browser to see the result. diff --git a/llama_stack/ui/app/layout.tsx b/llama_stack/ui/app/layout.tsx index f029002dd..ed8a6cd5d 100644 --- a/llama_stack/ui/app/layout.tsx +++ b/llama_stack/ui/app/layout.tsx @@ -20,7 +20,7 @@ export const metadata: Metadata = { }; import { SidebarProvider, SidebarTrigger } from "@/components/ui/sidebar"; -import { AppSidebar } from "@/components/app-sidebar"; +import { AppSidebar } from "@/components/layout/app-sidebar"; export default function Layout({ children }: { children: React.ReactNode }) { return ( diff --git a/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx new file mode 100644 index 000000000..f7c2580da --- /dev/null +++ b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx @@ -0,0 +1,62 @@ +"use client"; + +import { useEffect, useState } from "react"; +import { useParams } from "next/navigation"; +import LlamaStackClient from "llama-stack-client"; +import { ChatCompletion } from "@/lib/types"; +import { ChatCompletionDetailView } from "@/components/chat-completions/chat-completion-detail"; + +export default function ChatCompletionDetailPage() { + const params = useParams(); + const id = params.id as string; + + const [completionDetail, setCompletionDetail] = + useState(null); + const [isLoading, setIsLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + if (!id) { + setError(new Error("Completion ID is missing.")); + setIsLoading(false); + return; + } + + const client = new LlamaStackClient({ + baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL, + }); + + const fetchCompletionDetail = async () => { + setIsLoading(true); + setError(null); + setCompletionDetail(null); + try { + const response = await client.chat.completions.retrieve(id); + setCompletionDetail(response as ChatCompletion); + } catch (err) { + console.error( + `Error fetching chat completion detail for ID ${id}:`, + err, + ); + setError( + err instanceof Error + ? err + : new Error("Failed to fetch completion detail"), + ); + } finally { + setIsLoading(false); + } + }; + + fetchCompletionDetail(); + }, [id]); + + return ( + + ); +} diff --git a/llama_stack/ui/app/logs/chat-completions/layout.tsx b/llama_stack/ui/app/logs/chat-completions/layout.tsx new file mode 100644 index 000000000..3dd8c1222 --- /dev/null +++ b/llama_stack/ui/app/logs/chat-completions/layout.tsx @@ -0,0 +1,45 @@ +"use client"; + +import React from "react"; +import { usePathname, useParams } from "next/navigation"; +import { + PageBreadcrumb, + BreadcrumbSegment, +} from "@/components/layout/page-breadcrumb"; +import { truncateText } from "@/lib/truncate-text"; + +export default function ChatCompletionsLayout({ + children, +}: { + children: React.ReactNode; +}) { + const pathname = usePathname(); + const params = useParams(); + + let segments: BreadcrumbSegment[] = []; + + // Default for /logs/chat-completions + if (pathname === "/logs/chat-completions") { + segments = [{ label: "Chat Completions" }]; + } + + // For /logs/chat-completions/[id] + const idParam = params?.id; + if (idParam && typeof idParam === "string") { + segments = [ + { label: "Chat Completions", href: "/logs/chat-completions" }, + { label: `Details (${truncateText(idParam, 20)})` }, + ]; + } + + return ( +
+ <> + {segments.length > 0 && ( + + )} + {children} + +
+ ); +} diff --git a/llama_stack/ui/app/logs/chat-completions/page.tsx b/llama_stack/ui/app/logs/chat-completions/page.tsx index 84cceb8b7..3de77a042 100644 --- a/llama_stack/ui/app/logs/chat-completions/page.tsx +++ b/llama_stack/ui/app/logs/chat-completions/page.tsx @@ -1,7 +1,54 @@ -export default function ChatCompletions() { +"use client"; + +import { useEffect, useState } from "react"; +import LlamaStackClient from "llama-stack-client"; +import { ChatCompletion } from "@/lib/types"; +import { ChatCompletionsTable } from "@/components/chat-completions/chat-completion-table"; + +export default function ChatCompletionsPage() { + const [completions, setCompletions] = useState([]); + const [isLoading, setIsLoading] = useState(true); + const [error, setError] = useState(null); + + useEffect(() => { + const client = new LlamaStackClient({ + baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL, + }); + const fetchCompletions = async () => { + setIsLoading(true); + setError(null); + try { + const response = await client.chat.completions.list(); + const data = Array.isArray(response) + ? response + : (response as any).data; + + if (Array.isArray(data)) { + setCompletions(data); + } else { + console.error("Unexpected response structure:", response); + setError(new Error("Unexpected response structure")); + setCompletions([]); + } + } catch (err) { + console.error("Error fetching chat completions:", err); + setError( + err instanceof Error ? err : new Error("Failed to fetch completions"), + ); + setCompletions([]); + } finally { + setIsLoading(false); + } + }; + + fetchCompletions(); + }, []); + return ( -
-

Under Construction

-
+ ); } diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx new file mode 100644 index 000000000..33247ed26 --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx @@ -0,0 +1,193 @@ +import React from "react"; +import { render, screen } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ChatCompletionDetailView } from "./chat-completion-detail"; +import { ChatCompletion } from "@/lib/types"; + +// Initial test file setup for ChatCompletionDetailView + +describe("ChatCompletionDetailView", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + // Use the data-slot attribute for Skeletons + const skeletons = container.querySelectorAll('[data-slot="skeleton"]'); + expect(skeletons.length).toBeGreaterThan(0); + }); + + test("renders error message when error prop is provided", () => { + render( + , + ); + expect( + screen.getByText(/Error loading details for ID err-id: Network Error/), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is empty", () => { + render( + , + ); + // Use regex to match the error message regardless of whitespace + expect( + screen.getByText(/Error loading details for ID\s*err-id\s*:/), + ).toBeInTheDocument(); + }); + + test("renders error message when error prop is an object without message", () => { + render( + , + ); + // Use regex to match the error message regardless of whitespace + expect( + screen.getByText(/Error loading details for ID\s*err-id\s*:/), + ).toBeInTheDocument(); + }); + + test("renders not found message when completion is null and not loading/error", () => { + render( + , + ); + expect( + screen.getByText("No details found for completion ID: notfound-id."), + ).toBeInTheDocument(); + }); + + test("renders input, output, and properties for valid completion", () => { + const mockCompletion: ChatCompletion = { + id: "comp_123", + object: "chat.completion", + created: 1710000000, + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }; + render( + , + ); + // Input + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Test input")).toBeInTheDocument(); + // Output + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Test output")).toBeInTheDocument(); + // Properties + expect(screen.getByText("Properties")).toBeInTheDocument(); + expect(screen.getByText("Created:")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + expect(screen.getByText("ID:")).toBeInTheDocument(); + expect(screen.getByText("comp_123")).toBeInTheDocument(); + expect(screen.getByText("Model:")).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + expect(screen.getByText("Finish Reason:")).toBeInTheDocument(); + expect(screen.getByText("stop")).toBeInTheDocument(); + }); + + test("renders tool call in output and properties when present", () => { + const toolCall = { + function: { name: "search", arguments: '{"query":"llama"}' }, + }; + const mockCompletion: ChatCompletion = { + id: "comp_tool", + object: "chat.completion", + created: 1710001000, + model: "llama-tool-model", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Tool output", + tool_calls: [toolCall], + }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Tool input" }], + }; + render( + , + ); + // Output should include the tool call block (should be present twice: input and output) + const toolCallLabels = screen.getAllByText("Tool Call"); + expect(toolCallLabels.length).toBeGreaterThanOrEqual(1); // At least one, but could be two + // The tool call block should contain the formatted tool call string in both input and output + const toolCallBlocks = screen.getAllByText('search({"query":"llama"})'); + expect(toolCallBlocks.length).toBe(2); + // Properties should include the tool call name + expect(screen.getByText("Functions/Tools Called:")).toBeInTheDocument(); + expect(screen.getByText("search")).toBeInTheDocument(); + }); + + test("handles missing/empty fields gracefully", () => { + const mockCompletion: ChatCompletion = { + id: "comp_edge", + object: "chat.completion", + created: 1710002000, + model: "llama-edge-model", + choices: [], // No choices + input_messages: [], // No input messages + }; + render( + , + ); + // Input section should be present but empty + expect(screen.getByText("Input")).toBeInTheDocument(); + // Output section should show fallback message + expect( + screen.getByText("No message found in assistant's choice."), + ).toBeInTheDocument(); + // Properties should show N/A for finish reason + expect(screen.getByText("Finish Reason:")).toBeInTheDocument(); + expect(screen.getByText("N/A")).toBeInTheDocument(); + }); +}); diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx new file mode 100644 index 000000000..e76418d1a --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx @@ -0,0 +1,198 @@ +"use client"; + +import { ChatMessage, ChatCompletion } from "@/lib/types"; +import { ChatMessageItem } from "@/components/chat-completions/chat-messasge-item"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Skeleton } from "@/components/ui/skeleton"; + +function ChatCompletionDetailLoadingView() { + return ( + <> + {/* Title Skeleton */} +
+
+ {[...Array(2)].map((_, i) => ( + + + + + + + + + + + + + ))} +
+
+
+ {" "} + {/* Properties Title Skeleton */} + {[...Array(5)].map((_, i) => ( +
+ + +
+ ))} +
+
+
+ + ); +} + +interface ChatCompletionDetailViewProps { + completion: ChatCompletion | null; + isLoading: boolean; + error: Error | null; + id: string; +} + +export function ChatCompletionDetailView({ + completion, + isLoading, + error, + id, +}: ChatCompletionDetailViewProps) { + if (error) { + return ( + <> + {/* We still want a title for consistency on error pages */} +

Chat Completion Details

+

+ Error loading details for ID {id}: {error.message} +

+ + ); + } + + if (isLoading) { + return ; + } + + if (!completion) { + // This state means: not loading, no error, but no completion data + return ( + <> + {/* We still want a title for consistency on not-found pages */} +

Chat Completion Details

+

No details found for completion ID: {id}.

+ + ); + } + + // If no error, not loading, and completion exists, render the details: + return ( + <> +

Chat Completion Details

+
+
+ + + Input + + + {completion.input_messages?.map((msg, index) => ( + + ))} + {completion.choices?.[0]?.message?.tool_calls && + !completion.input_messages?.some( + (im) => + im.role === "assistant" && + im.tool_calls && + im.tool_calls.length > 0, + ) && + completion.choices[0].message.tool_calls.map( + (toolCall: any, index: number) => { + const assistantToolCallMessage: ChatMessage = { + role: "assistant", + tool_calls: [toolCall], + content: "", // Ensure content is defined, even if empty + }; + return ( + + ); + }, + )} + + + + + + Output + + + {completion.choices?.[0]?.message ? ( + + ) : ( +

+ No message found in assistant's choice. +

+ )} +
+
+
+ +
+ + + Properties + + +
    +
  • + Created:{" "} + + {new Date(completion.created * 1000).toLocaleString()} + +
  • +
  • + ID:{" "} + + {completion.id} + +
  • +
  • + Model:{" "} + + {completion.model} + +
  • +
  • + Finish Reason:{" "} + + {completion.choices?.[0]?.finish_reason || "N/A"} + +
  • + {completion.choices?.[0]?.message?.tool_calls && + completion.choices[0].message.tool_calls.length > 0 && ( +
  • + Functions/Tools Called: +
      + {completion.choices[0].message.tool_calls.map( + (toolCall: any, index: number) => ( +
    • + + {toolCall.function?.name || "N/A"} + +
    • + ), + )} +
    +
  • + )} +
+
+
+
+
+ + ); +} diff --git a/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx new file mode 100644 index 000000000..e71ef3d43 --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx @@ -0,0 +1,340 @@ +import React from "react"; +import { render, screen, fireEvent } from "@testing-library/react"; +import "@testing-library/jest-dom"; +import { ChatCompletionsTable } from "./chat-completion-table"; +import { ChatCompletion } from "@/lib/types"; // Assuming this path is correct + +// Mock next/navigation +const mockPush = jest.fn(); +jest.mock("next/navigation", () => ({ + useRouter: () => ({ + push: mockPush, + }), +})); + +// Mock helper functions +// These are hoisted, so their mocks are available throughout the file +jest.mock("@/lib/truncate-text"); +jest.mock("@/lib/format-tool-call"); + +// Import the mocked functions to set up default or specific implementations +import { truncateText as originalTruncateText } from "@/lib/truncate-text"; +import { formatToolCallToString as originalFormatToolCallToString } from "@/lib/format-tool-call"; + +// Cast to jest.Mock for typings +const truncateText = originalTruncateText as jest.Mock; +const formatToolCallToString = originalFormatToolCallToString as jest.Mock; + +describe("ChatCompletionsTable", () => { + const defaultProps = { + completions: [] as ChatCompletion[], + isLoading: false, + error: null, + }; + + beforeEach(() => { + // Reset all mocks before each test + mockPush.mockClear(); + truncateText.mockClear(); + formatToolCallToString.mockClear(); + + // Default pass-through implementation for tests not focusing on truncation/formatting + truncateText.mockImplementation((text: string | undefined) => text); + formatToolCallToString.mockImplementation((toolCall: any) => + toolCall && typeof toolCall === "object" && toolCall.name + ? `[DefaultToolCall:${toolCall.name}]` + : "[InvalidToolCall]", + ); + }); + + test("renders without crashing with default props", () => { + render(); + // Check for a unique element that should be present in the non-empty, non-loading, non-error state + // For now, as per Task 1, we will test the empty state message + expect(screen.getByText("No chat completions found.")).toBeInTheDocument(); + }); + + test("click on a row navigates to the correct URL", () => { + const { rerender } = render(); + + // Simulate a scenario where a completion exists and is clicked + const mockCompletion: ChatCompletion = { + id: "comp_123", + object: "chat.completion", + created: Math.floor(Date.now() / 1000), + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }; + + rerender( + , + ); + const row = screen.getByText("Test input").closest("tr"); + if (row) { + fireEvent.click(row); + expect(mockPush).toHaveBeenCalledWith("/logs/chat-completions/comp_123"); + } else { + throw new Error('Row with "Test input" not found for router mock test.'); + } + }); + + describe("Loading State", () => { + test("renders skeleton UI when isLoading is true", () => { + const { container } = render( + , + ); + + // The Skeleton component uses data-slot="skeleton" + const skeletonSelector = '[data-slot="skeleton"]'; + + // Check for skeleton in the table caption + const tableCaption = container.querySelector("caption"); + expect(tableCaption).toBeInTheDocument(); + if (tableCaption) { + const captionSkeleton = tableCaption.querySelector(skeletonSelector); + expect(captionSkeleton).toBeInTheDocument(); + } + + // Check for skeletons in the table body cells + const tableBody = container.querySelector("tbody"); + expect(tableBody).toBeInTheDocument(); + if (tableBody) { + const bodySkeletons = tableBody.querySelectorAll( + `td ${skeletonSelector}`, + ); + expect(bodySkeletons.length).toBeGreaterThan(0); // Ensure at least one skeleton cell exists + } + + // General check: ensure multiple skeleton elements are present in the table overall + const allSkeletonsInTable = container.querySelectorAll( + `table ${skeletonSelector}`, + ); + expect(allSkeletonsInTable.length).toBeGreaterThan(3); // e.g., caption + at least one row of 3 cells, or just a few + }); + }); + + describe("Error State", () => { + test("renders error message when error prop is provided", () => { + const errorMessage = "Network Error"; + render( + , + ); + expect( + screen.getByText(`Error fetching data: ${errorMessage}`), + ).toBeInTheDocument(); + }); + + test("renders default error message when error.message is not available", () => { + render( + , + ); // Error with empty message + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + + test("renders default error message when error prop is an object without message", () => { + render(); // Empty error object + expect( + screen.getByText("Error fetching data: An unknown error occurred"), + ).toBeInTheDocument(); + }); + }); + + describe("Empty State", () => { + test('renders "No chat completions found." and no table when completions array is empty', () => { + render( + , + ); + expect( + screen.getByText("No chat completions found."), + ).toBeInTheDocument(); + + // Ensure that the table structure is NOT rendered in the empty state + const table = screen.queryByRole("table"); + expect(table).not.toBeInTheDocument(); + }); + }); + + describe("Data Rendering", () => { + test("renders table caption, headers, and completion data correctly", () => { + const mockCompletions = [ + { + id: "comp_1", + object: "chat.completion", + created: 1710000000, // Fixed timestamp for test + model: "llama-test-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Test output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Test input" }], + }, + { + id: "comp_2", + object: "chat.completion", + created: 1710001000, + model: "llama-another-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: "Another output" }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Another input" }], + }, + ]; + + render( + , + ); + + // Table caption + expect( + screen.getByText("A list of your recent chat completions."), + ).toBeInTheDocument(); + + // Table headers + expect(screen.getByText("Input")).toBeInTheDocument(); + expect(screen.getByText("Output")).toBeInTheDocument(); + expect(screen.getByText("Model")).toBeInTheDocument(); + expect(screen.getByText("Created")).toBeInTheDocument(); + + // Data rows + expect(screen.getByText("Test input")).toBeInTheDocument(); + expect(screen.getByText("Test output")).toBeInTheDocument(); + expect(screen.getByText("llama-test-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710000000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + + expect(screen.getByText("Another input")).toBeInTheDocument(); + expect(screen.getByText("Another output")).toBeInTheDocument(); + expect(screen.getByText("llama-another-model")).toBeInTheDocument(); + expect( + screen.getByText(new Date(1710001000 * 1000).toLocaleString()), + ).toBeInTheDocument(); + }); + }); + + describe("Text Truncation and Tool Call Formatting", () => { + test("truncates long input and output text", () => { + // Specific mock implementation for this test + truncateText.mockImplementation( + (text: string | undefined, maxLength?: number) => { + const defaultTestMaxLength = 10; + const effectiveMaxLength = maxLength ?? defaultTestMaxLength; + return typeof text === "string" && text.length > effectiveMaxLength + ? text.slice(0, effectiveMaxLength) + "..." + : text; + }, + ); + + const longInput = + "This is a very long input message that should be truncated."; + const longOutput = + "This is a very long output message that should also be truncated."; + const mockCompletions = [ + { + id: "comp_trunc", + object: "chat.completion", + created: 1710002000, + model: "llama-trunc-model", + choices: [ + { + index: 0, + message: { role: "assistant", content: longOutput }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: longInput }], + }, + ]; + + render( + , + ); + + // The truncated text should be present for both input and output + const truncatedTexts = screen.getAllByText( + longInput.slice(0, 10) + "...", + ); + expect(truncatedTexts.length).toBe(2); // one for input, one for output + // Optionally, verify each one is in the document if getAllByText doesn't throw on not found + truncatedTexts.forEach((textElement) => + expect(textElement).toBeInTheDocument(), + ); + }); + + test("formats tool call output using formatToolCallToString", () => { + // Specific mock implementation for this test + formatToolCallToString.mockImplementation( + (toolCall: any) => `[TOOL:${toolCall.name}]`, + ); + // Ensure no truncation interferes for this specific test for clarity of tool call format + truncateText.mockImplementation((text: string | undefined) => text); + + const toolCall = { name: "search", args: { query: "llama" } }; + const mockCompletions = [ + { + id: "comp_tool", + object: "chat.completion", + created: 1710003000, + model: "llama-tool-model", + choices: [ + { + index: 0, + message: { + role: "assistant", + content: "Tool output", // Content that will be prepended + tool_calls: [toolCall], + }, + finish_reason: "stop", + }, + ], + input_messages: [{ role: "user", content: "Tool input" }], + }, + ]; + + render( + , + ); + + // The component concatenates message.content and the formatted tool call + expect(screen.getByText("Tool output [TOOL:search]")).toBeInTheDocument(); + }); + }); +}); diff --git a/llama_stack/ui/components/chat-completions/chat-completion-table.tsx b/llama_stack/ui/components/chat-completions/chat-completion-table.tsx new file mode 100644 index 000000000..e11acf376 --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-completion-table.tsx @@ -0,0 +1,120 @@ +"use client"; + +import { useRouter } from "next/navigation"; +import { ChatCompletion } from "@/lib/types"; +import { truncateText } from "@/lib/truncate-text"; +import { + extractTextFromContentPart, + extractDisplayableText, +} from "@/lib/format-message-content"; +import { + Table, + TableBody, + TableCaption, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; +import { Skeleton } from "@/components/ui/skeleton"; + +interface ChatCompletionsTableProps { + completions: ChatCompletion[]; + isLoading: boolean; + error: Error | null; +} + +export function ChatCompletionsTable({ + completions, + isLoading, + error, +}: ChatCompletionsTableProps) { + const router = useRouter(); + + const tableHeader = ( + + + Input + Output + Model + Created + + + ); + + if (isLoading) { + return ( + + + + + {tableHeader} + + {[...Array(3)].map((_, i) => ( + + + + + + + + + + + + + + + ))} + +
+ ); + } + + if (error) { + return ( +

Error fetching data: {error.message || "An unknown error occurred"}

+ ); + } + + if (completions.length === 0) { + return

No chat completions found.

; + } + + return ( + + A list of your recent chat completions. + {tableHeader} + + {completions.map((completion) => ( + + router.push(`/logs/chat-completions/${completion.id}`) + } + className="cursor-pointer hover:bg-muted/50" + > + + {truncateText( + extractTextFromContentPart( + completion.input_messages?.[0]?.content, + ), + )} + + + {(() => { + const message = completion.choices?.[0]?.message; + const outputText = extractDisplayableText(message); + return truncateText(outputText); + })()} + + {completion.model} + + {new Date(completion.created * 1000).toLocaleString()} + + + ))} + +
+ ); +} diff --git a/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx new file mode 100644 index 000000000..58a009aed --- /dev/null +++ b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx @@ -0,0 +1,107 @@ +"use client"; + +import { ChatMessage } from "@/lib/types"; +import React from "react"; +import { formatToolCallToString } from "@/lib/format-tool-call"; +import { extractTextFromContentPart } from "@/lib/format-message-content"; + +// Sub-component or helper for the common label + content structure +const MessageBlock: React.FC<{ + label: string; + labelDetail?: string; + content: React.ReactNode; +}> = ({ label, labelDetail, content }) => { + return ( +
+

+ {label} + {labelDetail && ( + + {labelDetail} + + )} +

+
{content}
+
+ ); +}; + +interface ToolCallBlockProps { + children: React.ReactNode; + className?: string; +} + +const ToolCallBlock = ({ children, className }: ToolCallBlockProps) => { + // Common styling for both function call arguments and tool output blocks + // Let's use slate-50 background as it's good for code-like content. + const baseClassName = + "p-3 bg-slate-50 border border-slate-200 rounded-md text-sm"; + + return ( +
+
{children}
+
+ ); +}; + +interface ChatMessageItemProps { + message: ChatMessage; +} +export function ChatMessageItem({ message }: ChatMessageItemProps) { + switch (message.role) { + case "system": + return ( + + ); + case "user": + return ( + + ); + + case "assistant": + if (message.tool_calls && message.tool_calls.length > 0) { + return ( + <> + {message.tool_calls.map((toolCall: any, index: number) => { + const formattedToolCall = formatToolCallToString(toolCall); + const toolCallContent = ( + + {formattedToolCall || "Error: Could not display tool call"} + + ); + return ( + + ); + })} + + ); + } else { + return ( + + ); + } + case "tool": + const toolOutputContent = ( + + {extractTextFromContentPart(message.content)} + + ); + return ( + + ); + } + return null; +} diff --git a/llama_stack/ui/components/app-sidebar.tsx b/llama_stack/ui/components/layout/app-sidebar.tsx similarity index 50% rename from llama_stack/ui/components/app-sidebar.tsx rename to llama_stack/ui/components/layout/app-sidebar.tsx index 3d541856f..1c53d6cc5 100644 --- a/llama_stack/ui/components/app-sidebar.tsx +++ b/llama_stack/ui/components/layout/app-sidebar.tsx @@ -1,5 +1,9 @@ +"use client"; + import { MessageSquareText, MessagesSquare, MoveUpRight } from "lucide-react"; import Link from "next/link"; +import { usePathname } from "next/navigation"; +import { cn } from "@/lib/utils"; import { Sidebar, @@ -32,6 +36,8 @@ const logItems = [ ]; export function AppSidebar() { + const pathname = usePathname(); + return ( @@ -42,16 +48,31 @@ export function AppSidebar() { Logs - {logItems.map((item) => ( - - - - - {item.title} - - - - ))} + {logItems.map((item) => { + const isActive = pathname.startsWith(item.url); + return ( + + + + + {item.title} + + + + ); + })} diff --git a/llama_stack/ui/components/layout/page-breadcrumb.tsx b/llama_stack/ui/components/layout/page-breadcrumb.tsx new file mode 100644 index 000000000..fdb561d68 --- /dev/null +++ b/llama_stack/ui/components/layout/page-breadcrumb.tsx @@ -0,0 +1,49 @@ +"use client"; + +import Link from "next/link"; +import React from "react"; +import { + Breadcrumb, + BreadcrumbItem, + BreadcrumbLink, + BreadcrumbList, + BreadcrumbPage, + BreadcrumbSeparator, +} from "@/components/ui/breadcrumb"; + +export interface BreadcrumbSegment { + label: string; + href?: string; +} + +interface PageBreadcrumbProps { + segments: BreadcrumbSegment[]; + className?: string; +} + +export function PageBreadcrumb({ segments, className }: PageBreadcrumbProps) { + if (!segments || segments.length === 0) { + return null; + } + + return ( + + + {segments.map((segment, index) => ( + + + {segment.href ? ( + + {segment.label} + + ) : ( + {segment.label} + )} + + {index < segments.length - 1 && } + + ))} + + + ); +} diff --git a/llama_stack/ui/components/ui/breadcrumb.tsx b/llama_stack/ui/components/ui/breadcrumb.tsx new file mode 100644 index 000000000..f63ae19af --- /dev/null +++ b/llama_stack/ui/components/ui/breadcrumb.tsx @@ -0,0 +1,109 @@ +import * as React from "react"; +import { Slot } from "@radix-ui/react-slot"; +import { ChevronRight, MoreHorizontal } from "lucide-react"; + +import { cn } from "@/lib/utils"; + +function Breadcrumb({ ...props }: React.ComponentProps<"nav">) { + return