diff --git a/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx
index f7c2580da..e6feef363 100644
--- a/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx
+++ b/llama_stack/ui/app/logs/chat-completions/[id]/page.tsx
@@ -2,9 +2,9 @@
import { useEffect, useState } from "react";
import { useParams } from "next/navigation";
-import LlamaStackClient from "llama-stack-client";
import { ChatCompletion } from "@/lib/types";
import { ChatCompletionDetailView } from "@/components/chat-completions/chat-completion-detail";
+import { client } from "@/lib/client";
export default function ChatCompletionDetailPage() {
const params = useParams();
@@ -22,10 +22,6 @@ export default function ChatCompletionDetailPage() {
return;
}
- const client = new LlamaStackClient({
- baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
- });
-
const fetchCompletionDetail = async () => {
setIsLoading(true);
setError(null);
diff --git a/llama_stack/ui/app/logs/chat-completions/layout.tsx b/llama_stack/ui/app/logs/chat-completions/layout.tsx
index 3dd8c1222..f4dbfc782 100644
--- a/llama_stack/ui/app/logs/chat-completions/layout.tsx
+++ b/llama_stack/ui/app/logs/chat-completions/layout.tsx
@@ -1,45 +1,19 @@
"use client";
import React from "react";
-import { usePathname, useParams } from "next/navigation";
-import {
- PageBreadcrumb,
- BreadcrumbSegment,
-} from "@/components/layout/page-breadcrumb";
-import { truncateText } from "@/lib/truncate-text";
+import LogsLayout from "@/components/layout/logs-layout";
export default function ChatCompletionsLayout({
children,
}: {
children: React.ReactNode;
}) {
- const pathname = usePathname();
- const params = useParams();
-
- let segments: BreadcrumbSegment[] = [];
-
- // Default for /logs/chat-completions
- if (pathname === "/logs/chat-completions") {
- segments = [{ label: "Chat Completions" }];
- }
-
- // For /logs/chat-completions/[id]
- const idParam = params?.id;
- if (idParam && typeof idParam === "string") {
- segments = [
- { label: "Chat Completions", href: "/logs/chat-completions" },
- { label: `Details (${truncateText(idParam, 20)})` },
- ];
- }
-
return (
-
- <>
- {segments.length > 0 && (
-
- )}
- {children}
- >
-
+
+ {children}
+
);
}
diff --git a/llama_stack/ui/app/logs/chat-completions/page.tsx b/llama_stack/ui/app/logs/chat-completions/page.tsx
index 3de77a042..5bbfcce94 100644
--- a/llama_stack/ui/app/logs/chat-completions/page.tsx
+++ b/llama_stack/ui/app/logs/chat-completions/page.tsx
@@ -1,9 +1,9 @@
"use client";
import { useEffect, useState } from "react";
-import LlamaStackClient from "llama-stack-client";
import { ChatCompletion } from "@/lib/types";
-import { ChatCompletionsTable } from "@/components/chat-completions/chat-completion-table";
+import { ChatCompletionsTable } from "@/components/chat-completions/chat-completions-table";
+import { client } from "@/lib/client";
export default function ChatCompletionsPage() {
const [completions, setCompletions] = useState([]);
@@ -11,9 +11,6 @@ export default function ChatCompletionsPage() {
const [error, setError] = useState(null);
useEffect(() => {
- const client = new LlamaStackClient({
- baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
- });
const fetchCompletions = async () => {
setIsLoading(true);
setError(null);
@@ -21,7 +18,7 @@ export default function ChatCompletionsPage() {
const response = await client.chat.completions.list();
const data = Array.isArray(response)
? response
- : (response as any).data;
+ : (response as { data: ChatCompletion[] }).data;
if (Array.isArray(data)) {
setCompletions(data);
@@ -46,7 +43,7 @@ export default function ChatCompletionsPage() {
return (
diff --git a/llama_stack/ui/app/logs/responses/[id]/page.tsx b/llama_stack/ui/app/logs/responses/[id]/page.tsx
new file mode 100644
index 000000000..efe6f0ff3
--- /dev/null
+++ b/llama_stack/ui/app/logs/responses/[id]/page.tsx
@@ -0,0 +1,125 @@
+"use client";
+
+import { useEffect, useState } from "react";
+import { useParams } from "next/navigation";
+import type { ResponseObject } from "llama-stack-client/resources/responses/responses";
+import { OpenAIResponse, InputItemListResponse } from "@/lib/types";
+import { ResponseDetailView } from "@/components/responses/responses-detail";
+import { client } from "@/lib/client";
+
+export default function ResponseDetailPage() {
+ const params = useParams();
+ const id = params.id as string;
+
+ const [responseDetail, setResponseDetail] = useState(
+ null,
+ );
+ const [inputItems, setInputItems] = useState(
+ null,
+ );
+ const [isLoading, setIsLoading] = useState(true);
+ const [isLoadingInputItems, setIsLoadingInputItems] = useState(true);
+ const [error, setError] = useState(null);
+ const [inputItemsError, setInputItemsError] = useState(null);
+
+ // Helper function to convert ResponseObject to OpenAIResponse
+ const convertResponseObject = (
+ responseData: ResponseObject,
+ ): OpenAIResponse => {
+ return {
+ id: responseData.id,
+ created_at: responseData.created_at,
+ model: responseData.model,
+ object: responseData.object,
+ status: responseData.status,
+ output: responseData.output as OpenAIResponse["output"],
+ input: [], // ResponseObject doesn't include input; component uses inputItems prop instead
+ error: responseData.error,
+ parallel_tool_calls: responseData.parallel_tool_calls,
+ previous_response_id: responseData.previous_response_id,
+ temperature: responseData.temperature,
+ top_p: responseData.top_p,
+ truncation: responseData.truncation,
+ user: responseData.user,
+ };
+ };
+
+ useEffect(() => {
+ if (!id) {
+ setError(new Error("Response ID is missing."));
+ setIsLoading(false);
+ return;
+ }
+
+ const fetchResponseDetail = async () => {
+ setIsLoading(true);
+ setIsLoadingInputItems(true);
+ setError(null);
+ setInputItemsError(null);
+ setResponseDetail(null);
+ setInputItems(null);
+
+ try {
+ const [responseResult, inputItemsResult] = await Promise.allSettled([
+ client.responses.retrieve(id),
+ client.responses.inputItems.list(id, { order: "asc" }),
+ ]);
+
+ // Handle response detail result
+ if (responseResult.status === "fulfilled") {
+ const convertedResponse = convertResponseObject(responseResult.value);
+ setResponseDetail(convertedResponse);
+ } else {
+ console.error(
+ `Error fetching response detail for ID ${id}:`,
+ responseResult.reason,
+ );
+ setError(
+ responseResult.reason instanceof Error
+ ? responseResult.reason
+ : new Error("Failed to fetch response detail"),
+ );
+ }
+
+ // Handle input items result
+ if (inputItemsResult.status === "fulfilled") {
+ const inputItemsData =
+ inputItemsResult.value as unknown as InputItemListResponse;
+ setInputItems(inputItemsData);
+ } else {
+ console.error(
+ `Error fetching input items for response ID ${id}:`,
+ inputItemsResult.reason,
+ );
+ setInputItemsError(
+ inputItemsResult.reason instanceof Error
+ ? inputItemsResult.reason
+ : new Error("Failed to fetch input items"),
+ );
+ }
+ } catch (err) {
+ console.error(`Unexpected error fetching data for ID ${id}:`, err);
+ setError(
+ err instanceof Error ? err : new Error("Unexpected error occurred"),
+ );
+ } finally {
+ setIsLoading(false);
+ setIsLoadingInputItems(false);
+ }
+ };
+
+ fetchResponseDetail();
+ }, [id]);
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/app/logs/responses/layout.tsx b/llama_stack/ui/app/logs/responses/layout.tsx
new file mode 100644
index 000000000..1fe116e5e
--- /dev/null
+++ b/llama_stack/ui/app/logs/responses/layout.tsx
@@ -0,0 +1,16 @@
+"use client";
+
+import React from "react";
+import LogsLayout from "@/components/layout/logs-layout";
+
+export default function ResponsesLayout({
+ children,
+}: {
+ children: React.ReactNode;
+}) {
+ return (
+
+ {children}
+
+ );
+}
diff --git a/llama_stack/ui/app/logs/responses/page.tsx b/llama_stack/ui/app/logs/responses/page.tsx
index cdc165d08..dab0c735f 100644
--- a/llama_stack/ui/app/logs/responses/page.tsx
+++ b/llama_stack/ui/app/logs/responses/page.tsx
@@ -1,7 +1,66 @@
-export default function Responses() {
+"use client";
+
+import { useEffect, useState } from "react";
+import type { ResponseListResponse } from "llama-stack-client/resources/responses/responses";
+import { OpenAIResponse } from "@/lib/types";
+import { ResponsesTable } from "@/components/responses/responses-table";
+import { client } from "@/lib/client";
+
+export default function ResponsesPage() {
+ const [responses, setResponses] = useState([]);
+ const [isLoading, setIsLoading] = useState(true);
+ const [error, setError] = useState(null);
+
+ // Helper function to convert ResponseListResponse.Data to OpenAIResponse
+ const convertResponseListData = (
+ responseData: ResponseListResponse.Data,
+ ): OpenAIResponse => {
+ return {
+ id: responseData.id,
+ created_at: responseData.created_at,
+ model: responseData.model,
+ object: responseData.object,
+ status: responseData.status,
+ output: responseData.output as OpenAIResponse["output"],
+ input: responseData.input as OpenAIResponse["input"],
+ error: responseData.error,
+ parallel_tool_calls: responseData.parallel_tool_calls,
+ previous_response_id: responseData.previous_response_id,
+ temperature: responseData.temperature,
+ top_p: responseData.top_p,
+ truncation: responseData.truncation,
+ user: responseData.user,
+ };
+ };
+
+ useEffect(() => {
+ const fetchResponses = async () => {
+ setIsLoading(true);
+ setError(null);
+ try {
+ const response = await client.responses.list();
+ const responseListData = response as ResponseListResponse;
+
+ const convertedResponses: OpenAIResponse[] = responseListData.data.map(
+ convertResponseListData,
+ );
+
+ setResponses(convertedResponses);
+ } catch (err) {
+ console.error("Error fetching responses:", err);
+ setError(
+ err instanceof Error ? err : new Error("Failed to fetch responses"),
+ );
+ setResponses([]);
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ fetchResponses();
+ }, []);
+
return (
-
-
Under Construction
-
+
);
}
diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx
index 33247ed26..5348dbc3a 100644
--- a/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx
+++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.test.tsx
@@ -75,7 +75,7 @@ describe("ChatCompletionDetailView", () => {
/>,
);
expect(
- screen.getByText("No details found for completion ID: notfound-id."),
+ screen.getByText("No details found for ID: notfound-id."),
).toBeInTheDocument();
});
diff --git a/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx
index e76418d1a..200807864 100644
--- a/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx
+++ b/llama_stack/ui/components/chat-completions/chat-completion-detail.tsx
@@ -3,45 +3,14 @@
import { ChatMessage, ChatCompletion } from "@/lib/types";
import { ChatMessageItem } from "@/components/chat-completions/chat-messasge-item";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
-import { Skeleton } from "@/components/ui/skeleton";
-
-function ChatCompletionDetailLoadingView() {
- return (
- <>
- {/* Title Skeleton */}
-
-
- {[...Array(2)].map((_, i) => (
-
-
-
-
-
-
-
-
-
-
-
-
- ))}
-
-
-
-
{" "}
- {/* Properties Title Skeleton */}
- {[...Array(5)].map((_, i) => (
-
-
-
-
- ))}
-
-
-
- >
- );
-}
+import {
+ DetailLoadingView,
+ DetailErrorView,
+ DetailNotFoundView,
+ DetailLayout,
+ PropertiesCard,
+ PropertyItem,
+} from "@/components/layout/detail-layout";
interface ChatCompletionDetailViewProps {
completion: ChatCompletion | null;
@@ -56,143 +25,121 @@ export function ChatCompletionDetailView({
error,
id,
}: ChatCompletionDetailViewProps) {
+ const title = "Chat Completion Details";
+
if (error) {
- return (
- <>
- {/* We still want a title for consistency on error pages */}
- Chat Completion Details
-
- Error loading details for ID {id}: {error.message}
-
- >
- );
+ return ;
}
if (isLoading) {
- return ;
+ return ;
}
if (!completion) {
- // This state means: not loading, no error, but no completion data
- return (
- <>
- {/* We still want a title for consistency on not-found pages */}
- Chat Completion Details
- No details found for completion ID: {id}.
- >
- );
+ return ;
}
- // If no error, not loading, and completion exists, render the details:
- return (
+ // Main content cards
+ const mainContent = (
<>
- Chat Completion Details
-
-
-
-
- Input
-
-
- {completion.input_messages?.map((msg, index) => (
-
- ))}
- {completion.choices?.[0]?.message?.tool_calls &&
- !completion.input_messages?.some(
- (im) =>
- im.role === "assistant" &&
- im.tool_calls &&
- im.tool_calls.length > 0,
- ) &&
- completion.choices[0].message.tool_calls.map(
- (toolCall: any, index: number) => {
- const assistantToolCallMessage: ChatMessage = {
- role: "assistant",
- tool_calls: [toolCall],
- content: "", // Ensure content is defined, even if empty
- };
- return (
-
- );
- },
- )}
-
-
+
+
+ Input
+
+
+ {completion.input_messages?.map((msg, index) => (
+
+ ))}
+ {completion.choices?.[0]?.message?.tool_calls &&
+ Array.isArray(completion.choices[0].message.tool_calls) &&
+ !completion.input_messages?.some(
+ (im) =>
+ im.role === "assistant" &&
+ im.tool_calls &&
+ Array.isArray(im.tool_calls) &&
+ im.tool_calls.length > 0,
+ )
+ ? completion.choices[0].message.tool_calls.map(
+ (toolCall: any, index: number) => {
+ const assistantToolCallMessage: ChatMessage = {
+ role: "assistant",
+ tool_calls: [toolCall],
+ content: "", // Ensure content is defined, even if empty
+ };
+ return (
+
+ );
+ },
+ )
+ : null}
+
+
-
-
- Output
-
-
- {completion.choices?.[0]?.message ? (
-
- ) : (
-
- No message found in assistant's choice.
-
- )}
-
-
-
-
-
-
-
- Properties
-
-
-
- -
- Created:{" "}
-
- {new Date(completion.created * 1000).toLocaleString()}
-
-
- -
- ID:{" "}
-
- {completion.id}
-
-
- -
- Model:{" "}
-
- {completion.model}
-
-
- -
- Finish Reason:{" "}
-
- {completion.choices?.[0]?.finish_reason || "N/A"}
-
-
- {completion.choices?.[0]?.message?.tool_calls &&
- completion.choices[0].message.tool_calls.length > 0 && (
- -
- Functions/Tools Called:
-
- {completion.choices[0].message.tool_calls.map(
- (toolCall: any, index: number) => (
- -
-
- {toolCall.function?.name || "N/A"}
-
-
- ),
- )}
-
-
- )}
-
-
-
-
-
+
+
+ Output
+
+
+ {completion.choices?.[0]?.message ? (
+
+ ) : (
+
+ No message found in assistant's choice.
+
+ )}
+
+
>
);
+
+ // Properties sidebar
+ const sidebar = (
+
+
+
+
+
+ {(() => {
+ const toolCalls = completion.choices?.[0]?.message?.tool_calls;
+ if (toolCalls && Array.isArray(toolCalls) && toolCalls.length > 0) {
+ return (
+
+
+ {toolCalls.map((toolCall: any, index: number) => (
+ -
+
+ {toolCall.function?.name || "N/A"}
+
+
+ ))}
+
+
+ }
+ hasBorder
+ />
+ );
+ }
+ return null;
+ })()}
+
+ );
+
+ return (
+
+ );
}
diff --git a/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx
index e71ef3d43..c8a55b100 100644
--- a/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx
+++ b/llama_stack/ui/components/chat-completions/chat-completion-table.test.tsx
@@ -1,8 +1,8 @@
import React from "react";
import { render, screen, fireEvent } from "@testing-library/react";
import "@testing-library/jest-dom";
-import { ChatCompletionsTable } from "./chat-completion-table";
-import { ChatCompletion } from "@/lib/types"; // Assuming this path is correct
+import { ChatCompletionsTable } from "./chat-completions-table";
+import { ChatCompletion } from "@/lib/types";
// Mock next/navigation
const mockPush = jest.fn();
@@ -13,21 +13,25 @@ jest.mock("next/navigation", () => ({
}));
// Mock helper functions
-// These are hoisted, so their mocks are available throughout the file
jest.mock("@/lib/truncate-text");
-jest.mock("@/lib/format-tool-call");
+jest.mock("@/lib/format-message-content");
// Import the mocked functions to set up default or specific implementations
import { truncateText as originalTruncateText } from "@/lib/truncate-text";
-import { formatToolCallToString as originalFormatToolCallToString } from "@/lib/format-tool-call";
+import {
+ extractTextFromContentPart as originalExtractTextFromContentPart,
+ extractDisplayableText as originalExtractDisplayableText,
+} from "@/lib/format-message-content";
// Cast to jest.Mock for typings
const truncateText = originalTruncateText as jest.Mock;
-const formatToolCallToString = originalFormatToolCallToString as jest.Mock;
+const extractTextFromContentPart =
+ originalExtractTextFromContentPart as jest.Mock;
+const extractDisplayableText = originalExtractDisplayableText as jest.Mock;
describe("ChatCompletionsTable", () => {
const defaultProps = {
- completions: [] as ChatCompletion[],
+ data: [] as ChatCompletion[],
isLoading: false,
error: null,
};
@@ -36,28 +40,26 @@ describe("ChatCompletionsTable", () => {
// Reset all mocks before each test
mockPush.mockClear();
truncateText.mockClear();
- formatToolCallToString.mockClear();
+ extractTextFromContentPart.mockClear();
+ extractDisplayableText.mockClear();
- // Default pass-through implementation for tests not focusing on truncation/formatting
+ // Default pass-through implementations
truncateText.mockImplementation((text: string | undefined) => text);
- formatToolCallToString.mockImplementation((toolCall: any) =>
- toolCall && typeof toolCall === "object" && toolCall.name
- ? `[DefaultToolCall:${toolCall.name}]`
- : "[InvalidToolCall]",
+ extractTextFromContentPart.mockImplementation((content: unknown) =>
+ typeof content === "string" ? content : "extracted text",
+ );
+ extractDisplayableText.mockImplementation(
+ (message: unknown) =>
+ (message as { content?: string })?.content || "extracted output",
);
});
test("renders without crashing with default props", () => {
render();
- // Check for a unique element that should be present in the non-empty, non-loading, non-error state
- // For now, as per Task 1, we will test the empty state message
expect(screen.getByText("No chat completions found.")).toBeInTheDocument();
});
test("click on a row navigates to the correct URL", () => {
- const { rerender } = render();
-
- // Simulate a scenario where a completion exists and is clicked
const mockCompletion: ChatCompletion = {
id: "comp_123",
object: "chat.completion",
@@ -73,9 +75,12 @@ describe("ChatCompletionsTable", () => {
input_messages: [{ role: "user", content: "Test input" }],
};
- rerender(
- ,
- );
+ // Set up mocks to return expected values
+ extractTextFromContentPart.mockReturnValue("Test input");
+ extractDisplayableText.mockReturnValue("Test output");
+
+ render();
+
const row = screen.getByText("Test input").closest("tr");
if (row) {
fireEvent.click(row);
@@ -91,14 +96,13 @@ describe("ChatCompletionsTable", () => {
,
);
- // The Skeleton component uses data-slot="skeleton"
- const skeletonSelector = '[data-slot="skeleton"]';
-
// Check for skeleton in the table caption
const tableCaption = container.querySelector("caption");
expect(tableCaption).toBeInTheDocument();
if (tableCaption) {
- const captionSkeleton = tableCaption.querySelector(skeletonSelector);
+ const captionSkeleton = tableCaption.querySelector(
+ '[data-slot="skeleton"]',
+ );
expect(captionSkeleton).toBeInTheDocument();
}
@@ -107,16 +111,10 @@ describe("ChatCompletionsTable", () => {
expect(tableBody).toBeInTheDocument();
if (tableBody) {
const bodySkeletons = tableBody.querySelectorAll(
- `td ${skeletonSelector}`,
+ '[data-slot="skeleton"]',
);
- expect(bodySkeletons.length).toBeGreaterThan(0); // Ensure at least one skeleton cell exists
+ expect(bodySkeletons.length).toBeGreaterThan(0);
}
-
- // General check: ensure multiple skeleton elements are present in the table overall
- const allSkeletonsInTable = container.querySelectorAll(
- `table ${skeletonSelector}`,
- );
- expect(allSkeletonsInTable.length).toBeGreaterThan(3); // e.g., caption + at least one row of 3 cells, or just a few
});
});
@@ -140,14 +138,14 @@ describe("ChatCompletionsTable", () => {
{...defaultProps}
error={{ name: "Error", message: "" }}
/>,
- ); // Error with empty message
+ );
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
});
test("renders default error message when error prop is an object without message", () => {
- render(); // Empty error object
+ render();
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
@@ -155,14 +153,8 @@ describe("ChatCompletionsTable", () => {
});
describe("Empty State", () => {
- test('renders "No chat completions found." and no table when completions array is empty', () => {
- render(
- ,
- );
+ test('renders "No chat completions found." and no table when data array is empty', () => {
+ render();
expect(
screen.getByText("No chat completions found."),
).toBeInTheDocument();
@@ -179,7 +171,7 @@ describe("ChatCompletionsTable", () => {
{
id: "comp_1",
object: "chat.completion",
- created: 1710000000, // Fixed timestamp for test
+ created: 1710000000,
model: "llama-test-model",
choices: [
{
@@ -206,9 +198,22 @@ describe("ChatCompletionsTable", () => {
},
];
+ // Set up mocks to return expected values
+ extractTextFromContentPart.mockImplementation((content: unknown) => {
+ if (content === "Test input") return "Test input";
+ if (content === "Another input") return "Another input";
+ return "extracted text";
+ });
+ extractDisplayableText.mockImplementation((message: unknown) => {
+ const msg = message as { content?: string };
+ if (msg?.content === "Test output") return "Test output";
+ if (msg?.content === "Another output") return "Another output";
+ return "extracted output";
+ });
+
render(
,
@@ -242,7 +247,7 @@ describe("ChatCompletionsTable", () => {
});
});
- describe("Text Truncation and Tool Call Formatting", () => {
+ describe("Text Truncation and Content Extraction", () => {
test("truncates long input and output text", () => {
// Specific mock implementation for this test
truncateText.mockImplementation(
@@ -259,6 +264,10 @@ describe("ChatCompletionsTable", () => {
"This is a very long input message that should be truncated.";
const longOutput =
"This is a very long output message that should also be truncated.";
+
+ extractTextFromContentPart.mockReturnValue(longInput);
+ extractDisplayableText.mockReturnValue(longOutput);
+
const mockCompletions = [
{
id: "comp_trunc",
@@ -278,7 +287,7 @@ describe("ChatCompletionsTable", () => {
render(
,
@@ -289,52 +298,50 @@ describe("ChatCompletionsTable", () => {
longInput.slice(0, 10) + "...",
);
expect(truncatedTexts.length).toBe(2); // one for input, one for output
- // Optionally, verify each one is in the document if getAllByText doesn't throw on not found
truncatedTexts.forEach((textElement) =>
expect(textElement).toBeInTheDocument(),
);
});
- test("formats tool call output using formatToolCallToString", () => {
- // Specific mock implementation for this test
- formatToolCallToString.mockImplementation(
- (toolCall: any) => `[TOOL:${toolCall.name}]`,
- );
- // Ensure no truncation interferes for this specific test for clarity of tool call format
- truncateText.mockImplementation((text: string | undefined) => text);
+ test("uses content extraction functions correctly", () => {
+ const mockCompletion = {
+ id: "comp_extract",
+ object: "chat.completion",
+ created: 1710003000,
+ model: "llama-extract-model",
+ choices: [
+ {
+ index: 0,
+ message: { role: "assistant", content: "Extracted output" },
+ finish_reason: "stop",
+ },
+ ],
+ input_messages: [{ role: "user", content: "Extracted input" }],
+ };
- const toolCall = { name: "search", args: { query: "llama" } };
- const mockCompletions = [
- {
- id: "comp_tool",
- object: "chat.completion",
- created: 1710003000,
- model: "llama-tool-model",
- choices: [
- {
- index: 0,
- message: {
- role: "assistant",
- content: "Tool output", // Content that will be prepended
- tool_calls: [toolCall],
- },
- finish_reason: "stop",
- },
- ],
- input_messages: [{ role: "user", content: "Tool input" }],
- },
- ];
+ extractTextFromContentPart.mockReturnValue("Extracted input");
+ extractDisplayableText.mockReturnValue("Extracted output");
render(
,
);
- // The component concatenates message.content and the formatted tool call
- expect(screen.getByText("Tool output [TOOL:search]")).toBeInTheDocument();
+ // Verify the extraction functions were called
+ expect(extractTextFromContentPart).toHaveBeenCalledWith(
+ "Extracted input",
+ );
+ expect(extractDisplayableText).toHaveBeenCalledWith({
+ role: "assistant",
+ content: "Extracted output",
+ });
+
+ // Verify the extracted content is displayed
+ expect(screen.getByText("Extracted input")).toBeInTheDocument();
+ expect(screen.getByText("Extracted output")).toBeInTheDocument();
});
});
});
diff --git a/llama_stack/ui/components/chat-completions/chat-completions-table.tsx b/llama_stack/ui/components/chat-completions/chat-completions-table.tsx
new file mode 100644
index 000000000..5f1d2f03d
--- /dev/null
+++ b/llama_stack/ui/components/chat-completions/chat-completions-table.tsx
@@ -0,0 +1,43 @@
+"use client";
+
+import { ChatCompletion } from "@/lib/types";
+import { LogsTable, LogTableRow } from "@/components/logs/logs-table";
+import {
+ extractTextFromContentPart,
+ extractDisplayableText,
+} from "@/lib/format-message-content";
+
+interface ChatCompletionsTableProps {
+ data: ChatCompletion[];
+ isLoading: boolean;
+ error: Error | null;
+}
+
+function formatChatCompletionToRow(completion: ChatCompletion): LogTableRow {
+ return {
+ id: completion.id,
+ input: extractTextFromContentPart(completion.input_messages?.[0]?.content),
+ output: extractDisplayableText(completion.choices?.[0]?.message),
+ model: completion.model,
+ createdTime: new Date(completion.created * 1000).toLocaleString(),
+ detailPath: `/logs/chat-completions/${completion.id}`,
+ };
+}
+
+export function ChatCompletionsTable({
+ data,
+ isLoading,
+ error,
+}: ChatCompletionsTableProps) {
+ const formattedData = data.map(formatChatCompletionToRow);
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx
index 58a009aed..2e8593bfb 100644
--- a/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx
+++ b/llama_stack/ui/components/chat-completions/chat-messasge-item.tsx
@@ -4,45 +4,10 @@ import { ChatMessage } from "@/lib/types";
import React from "react";
import { formatToolCallToString } from "@/lib/format-tool-call";
import { extractTextFromContentPart } from "@/lib/format-message-content";
-
-// Sub-component or helper for the common label + content structure
-const MessageBlock: React.FC<{
- label: string;
- labelDetail?: string;
- content: React.ReactNode;
-}> = ({ label, labelDetail, content }) => {
- return (
-
-
- {label}
- {labelDetail && (
-
- {labelDetail}
-
- )}
-
-
{content}
-
- );
-};
-
-interface ToolCallBlockProps {
- children: React.ReactNode;
- className?: string;
-}
-
-const ToolCallBlock = ({ children, className }: ToolCallBlockProps) => {
- // Common styling for both function call arguments and tool output blocks
- // Let's use slate-50 background as it's good for code-like content.
- const baseClassName =
- "p-3 bg-slate-50 border border-slate-200 rounded-md text-sm";
-
- return (
-
- );
-};
+import {
+ MessageBlock,
+ ToolCallBlock,
+} from "@/components/ui/message-components";
interface ChatMessageItemProps {
message: ChatMessage;
@@ -65,7 +30,11 @@ export function ChatMessageItem({ message }: ChatMessageItemProps) {
);
case "assistant":
- if (message.tool_calls && message.tool_calls.length > 0) {
+ if (
+ message.tool_calls &&
+ Array.isArray(message.tool_calls) &&
+ message.tool_calls.length > 0
+ ) {
return (
<>
{message.tool_calls.map((toolCall: any, index: number) => {
diff --git a/llama_stack/ui/components/layout/detail-layout.tsx b/llama_stack/ui/components/layout/detail-layout.tsx
new file mode 100644
index 000000000..58b912703
--- /dev/null
+++ b/llama_stack/ui/components/layout/detail-layout.tsx
@@ -0,0 +1,141 @@
+import React from "react";
+import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
+import { Skeleton } from "@/components/ui/skeleton";
+
+export function DetailLoadingView({ title }: { title: string }) {
+ return (
+ <>
+ {/* Title Skeleton */}
+
+
+ {[...Array(2)].map((_, i) => (
+
+
+
+
+
+
+
+
+
+
+
+
+ ))}
+
+
+
+
{" "}
+ {/* Properties Title Skeleton */}
+ {[...Array(5)].map((_, i) => (
+
+
+
+
+ ))}
+
+
+
+ >
+ );
+}
+
+export function DetailErrorView({
+ title,
+ id,
+ error,
+}: {
+ title: string;
+ id: string;
+ error: Error;
+}) {
+ return (
+ <>
+ {title}
+
+ Error loading details for ID {id}: {error.message}
+
+ >
+ );
+}
+
+export function DetailNotFoundView({
+ title,
+ id,
+}: {
+ title: string;
+ id: string;
+}) {
+ return (
+ <>
+ {title}
+ No details found for ID: {id}.
+ >
+ );
+}
+
+export interface PropertyItemProps {
+ label: string;
+ value: React.ReactNode;
+ className?: string;
+ hasBorder?: boolean;
+}
+
+export function PropertyItem({
+ label,
+ value,
+ className = "",
+ hasBorder = false,
+}: PropertyItemProps) {
+ return (
+
+ {label}:{" "}
+ {typeof value === "string" || typeof value === "number" ? (
+ {value}
+ ) : (
+ value
+ )}
+
+ );
+}
+
+export interface PropertiesCardProps {
+ children: React.ReactNode;
+}
+
+export function PropertiesCard({ children }: PropertiesCardProps) {
+ return (
+
+
+ Properties
+
+
+
+
+
+ );
+}
+
+export interface DetailLayoutProps {
+ title: string;
+ mainContent: React.ReactNode;
+ sidebar: React.ReactNode;
+}
+
+export function DetailLayout({
+ title,
+ mainContent,
+ sidebar,
+}: DetailLayoutProps) {
+ return (
+ <>
+ {title}
+
+
{mainContent}
+
{sidebar}
+
+ >
+ );
+}
diff --git a/llama_stack/ui/components/layout/logs-layout.tsx b/llama_stack/ui/components/layout/logs-layout.tsx
new file mode 100644
index 000000000..468ad6e9a
--- /dev/null
+++ b/llama_stack/ui/components/layout/logs-layout.tsx
@@ -0,0 +1,49 @@
+"use client";
+
+import React from "react";
+import { usePathname, useParams } from "next/navigation";
+import {
+ PageBreadcrumb,
+ BreadcrumbSegment,
+} from "@/components/layout/page-breadcrumb";
+import { truncateText } from "@/lib/truncate-text";
+
+interface LogsLayoutProps {
+ children: React.ReactNode;
+ sectionLabel: string;
+ basePath: string;
+}
+
+export default function LogsLayout({
+ children,
+ sectionLabel,
+ basePath,
+}: LogsLayoutProps) {
+ const pathname = usePathname();
+ const params = useParams();
+
+ let segments: BreadcrumbSegment[] = [];
+
+ if (pathname === basePath) {
+ segments = [{ label: sectionLabel }];
+ }
+
+ const idParam = params?.id;
+ if (idParam && typeof idParam === "string") {
+ segments = [
+ { label: sectionLabel, href: basePath },
+ { label: `Details (${truncateText(idParam, 20)})` },
+ ];
+ }
+
+ return (
+
+ <>
+ {segments.length > 0 && (
+
+ )}
+ {children}
+ >
+
+ );
+}
diff --git a/llama_stack/ui/components/logs/logs-table.test.tsx b/llama_stack/ui/components/logs/logs-table.test.tsx
new file mode 100644
index 000000000..88263b2fc
--- /dev/null
+++ b/llama_stack/ui/components/logs/logs-table.test.tsx
@@ -0,0 +1,350 @@
+import React from "react";
+import { render, screen, fireEvent } from "@testing-library/react";
+import "@testing-library/jest-dom";
+import { LogsTable, LogTableRow } from "./logs-table";
+
+// Mock next/navigation
+const mockPush = jest.fn();
+jest.mock("next/navigation", () => ({
+ useRouter: () => ({
+ push: mockPush,
+ }),
+}));
+
+// Mock helper functions
+jest.mock("@/lib/truncate-text");
+
+// Import the mocked functions
+import { truncateText as originalTruncateText } from "@/lib/truncate-text";
+
+// Cast to jest.Mock for typings
+const truncateText = originalTruncateText as jest.Mock;
+
+describe("LogsTable", () => {
+ const defaultProps = {
+ data: [] as LogTableRow[],
+ isLoading: false,
+ error: null,
+ caption: "Test table caption",
+ emptyMessage: "No data found",
+ };
+
+ beforeEach(() => {
+ // Reset all mocks before each test
+ mockPush.mockClear();
+ truncateText.mockClear();
+
+ // Default pass-through implementation
+ truncateText.mockImplementation((text: string | undefined) => text);
+ });
+
+ test("renders without crashing with default props", () => {
+ render();
+ expect(screen.getByText("No data found")).toBeInTheDocument();
+ });
+
+ test("click on a row navigates to the correct URL", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_123",
+ input: "Test input",
+ output: "Test output",
+ model: "test-model",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path/row_123",
+ },
+ ];
+
+ render();
+
+ const row = screen.getByText("Test input").closest("tr");
+ if (row) {
+ fireEvent.click(row);
+ expect(mockPush).toHaveBeenCalledWith("/test/path/row_123");
+ } else {
+ throw new Error('Row with "Test input" not found for router mock test.');
+ }
+ });
+
+ describe("Loading State", () => {
+ test("renders skeleton UI when isLoading is true", () => {
+ const { container } = render(
+ ,
+ );
+
+ // Check for skeleton in the table caption
+ const tableCaption = container.querySelector("caption");
+ expect(tableCaption).toBeInTheDocument();
+ if (tableCaption) {
+ const captionSkeleton = tableCaption.querySelector(
+ '[data-slot="skeleton"]',
+ );
+ expect(captionSkeleton).toBeInTheDocument();
+ }
+
+ // Check for skeletons in the table body cells
+ const tableBody = container.querySelector("tbody");
+ expect(tableBody).toBeInTheDocument();
+ if (tableBody) {
+ const bodySkeletons = tableBody.querySelectorAll(
+ '[data-slot="skeleton"]',
+ );
+ expect(bodySkeletons.length).toBeGreaterThan(0);
+ }
+
+ // Check that table headers are still rendered
+ expect(screen.getByText("Input")).toBeInTheDocument();
+ expect(screen.getByText("Output")).toBeInTheDocument();
+ expect(screen.getByText("Model")).toBeInTheDocument();
+ expect(screen.getByText("Created")).toBeInTheDocument();
+ });
+
+ test("renders correct number of skeleton rows", () => {
+ const { container } = render(
+ ,
+ );
+
+ const skeletonRows = container.querySelectorAll("tbody tr");
+ expect(skeletonRows.length).toBe(3); // Should render 3 skeleton rows
+ });
+ });
+
+ describe("Error State", () => {
+ test("renders error message when error prop is provided", () => {
+ const errorMessage = "Network Error";
+ render(
+ ,
+ );
+ expect(
+ screen.getByText(`Error fetching data: ${errorMessage}`),
+ ).toBeInTheDocument();
+ });
+
+ test("renders default error message when error.message is not available", () => {
+ render(
+ ,
+ );
+ expect(
+ screen.getByText("Error fetching data: An unknown error occurred"),
+ ).toBeInTheDocument();
+ });
+
+ test("renders default error message when error prop is an object without message", () => {
+ render();
+ expect(
+ screen.getByText("Error fetching data: An unknown error occurred"),
+ ).toBeInTheDocument();
+ });
+
+ test("does not render table when in error state", () => {
+ render(
+ ,
+ );
+ const table = screen.queryByRole("table");
+ expect(table).not.toBeInTheDocument();
+ });
+ });
+
+ describe("Empty State", () => {
+ test("renders custom empty message when data array is empty", () => {
+ render(
+ ,
+ );
+ expect(screen.getByText("Custom empty message")).toBeInTheDocument();
+
+ // Ensure that the table structure is NOT rendered in the empty state
+ const table = screen.queryByRole("table");
+ expect(table).not.toBeInTheDocument();
+ });
+ });
+
+ describe("Data Rendering", () => {
+ test("renders table caption, headers, and data correctly", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: "First input",
+ output: "First output",
+ model: "model-1",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/path/1",
+ },
+ {
+ id: "row_2",
+ input: "Second input",
+ output: "Second output",
+ model: "model-2",
+ createdTime: "2024-01-02 13:00:00",
+ detailPath: "/path/2",
+ },
+ ];
+
+ render(
+ ,
+ );
+
+ // Table caption
+ expect(screen.getByText("Custom table caption")).toBeInTheDocument();
+
+ // Table headers
+ expect(screen.getByText("Input")).toBeInTheDocument();
+ expect(screen.getByText("Output")).toBeInTheDocument();
+ expect(screen.getByText("Model")).toBeInTheDocument();
+ expect(screen.getByText("Created")).toBeInTheDocument();
+
+ // Data rows
+ expect(screen.getByText("First input")).toBeInTheDocument();
+ expect(screen.getByText("First output")).toBeInTheDocument();
+ expect(screen.getByText("model-1")).toBeInTheDocument();
+ expect(screen.getByText("2024-01-01 12:00:00")).toBeInTheDocument();
+
+ expect(screen.getByText("Second input")).toBeInTheDocument();
+ expect(screen.getByText("Second output")).toBeInTheDocument();
+ expect(screen.getByText("model-2")).toBeInTheDocument();
+ expect(screen.getByText("2024-01-02 13:00:00")).toBeInTheDocument();
+ });
+
+ test("applies correct CSS classes to table rows", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: "Test input",
+ output: "Test output",
+ model: "test-model",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path",
+ },
+ ];
+
+ render();
+
+ const row = screen.getByText("Test input").closest("tr");
+ expect(row).toHaveClass("cursor-pointer");
+ expect(row).toHaveClass("hover:bg-muted/50");
+ });
+
+ test("applies correct alignment to Created column", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: "Test input",
+ output: "Test output",
+ model: "test-model",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path",
+ },
+ ];
+
+ render();
+
+ const createdCell = screen.getByText("2024-01-01 12:00:00").closest("td");
+ expect(createdCell).toHaveClass("text-right");
+ });
+ });
+
+ describe("Text Truncation", () => {
+ test("truncates input and output text using truncateText function", () => {
+ // Mock truncateText to return truncated versions
+ truncateText.mockImplementation((text: string | undefined) => {
+ if (typeof text === "string" && text.length > 10) {
+ return text.slice(0, 10) + "...";
+ }
+ return text;
+ });
+
+ const longInput =
+ "This is a very long input text that should be truncated";
+ const longOutput =
+ "This is a very long output text that should be truncated";
+
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: longInput,
+ output: longOutput,
+ model: "test-model",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path",
+ },
+ ];
+
+ render();
+
+ // Verify truncateText was called
+ expect(truncateText).toHaveBeenCalledWith(longInput);
+ expect(truncateText).toHaveBeenCalledWith(longOutput);
+
+ // Verify truncated text is displayed
+ const truncatedTexts = screen.getAllByText("This is a ...");
+ expect(truncatedTexts).toHaveLength(2); // one for input, one for output
+ truncatedTexts.forEach((textElement) =>
+ expect(textElement).toBeInTheDocument(),
+ );
+ });
+
+ test("does not truncate model names", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: "Test input",
+ output: "Test output",
+ model: "very-long-model-name-that-should-not-be-truncated",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path",
+ },
+ ];
+
+ render();
+
+ // Model name should not be passed to truncateText
+ expect(truncateText).not.toHaveBeenCalledWith(
+ "very-long-model-name-that-should-not-be-truncated",
+ );
+
+ // Full model name should be displayed
+ expect(
+ screen.getByText("very-long-model-name-that-should-not-be-truncated"),
+ ).toBeInTheDocument();
+ });
+ });
+
+ describe("Accessibility", () => {
+ test("table has proper role and structure", () => {
+ const mockData: LogTableRow[] = [
+ {
+ id: "row_1",
+ input: "Test input",
+ output: "Test output",
+ model: "test-model",
+ createdTime: "2024-01-01 12:00:00",
+ detailPath: "/test/path",
+ },
+ ];
+
+ render();
+
+ const table = screen.getByRole("table");
+ expect(table).toBeInTheDocument();
+
+ const columnHeaders = screen.getAllByRole("columnheader");
+ expect(columnHeaders).toHaveLength(4);
+
+ const rows = screen.getAllByRole("row");
+ expect(rows).toHaveLength(2); // 1 header row + 1 data row
+ });
+ });
+});
diff --git a/llama_stack/ui/components/chat-completions/chat-completion-table.tsx b/llama_stack/ui/components/logs/logs-table.tsx
similarity index 57%
rename from llama_stack/ui/components/chat-completions/chat-completion-table.tsx
rename to llama_stack/ui/components/logs/logs-table.tsx
index e11acf376..33afea61b 100644
--- a/llama_stack/ui/components/chat-completions/chat-completion-table.tsx
+++ b/llama_stack/ui/components/logs/logs-table.tsx
@@ -1,12 +1,7 @@
"use client";
import { useRouter } from "next/navigation";
-import { ChatCompletion } from "@/lib/types";
import { truncateText } from "@/lib/truncate-text";
-import {
- extractTextFromContentPart,
- extractDisplayableText,
-} from "@/lib/format-message-content";
import {
Table,
TableBody,
@@ -18,17 +13,31 @@ import {
} from "@/components/ui/table";
import { Skeleton } from "@/components/ui/skeleton";
-interface ChatCompletionsTableProps {
- completions: ChatCompletion[];
- isLoading: boolean;
- error: Error | null;
+// Generic table row data interface
+export interface LogTableRow {
+ id: string;
+ input: string;
+ output: string;
+ model: string;
+ createdTime: string;
+ detailPath: string;
}
-export function ChatCompletionsTable({
- completions,
+interface LogsTableProps {
+ data: LogTableRow[];
+ isLoading: boolean;
+ error: Error | null;
+ caption: string;
+ emptyMessage: string;
+}
+
+export function LogsTable({
+ data,
isLoading,
error,
-}: ChatCompletionsTableProps) {
+ caption,
+ emptyMessage,
+}: LogsTableProps) {
const router = useRouter();
const tableHeader = (
@@ -77,41 +86,25 @@ export function ChatCompletionsTable({
);
}
- if (completions.length === 0) {
- return No chat completions found.
;
+ if (data.length === 0) {
+ return {emptyMessage}
;
}
return (
- A list of your recent chat completions.
+ {caption}
{tableHeader}
- {completions.map((completion) => (
+ {data.map((row) => (
- router.push(`/logs/chat-completions/${completion.id}`)
- }
+ key={row.id}
+ onClick={() => router.push(row.detailPath)}
className="cursor-pointer hover:bg-muted/50"
>
-
- {truncateText(
- extractTextFromContentPart(
- completion.input_messages?.[0]?.content,
- ),
- )}
-
-
- {(() => {
- const message = completion.choices?.[0]?.message;
- const outputText = extractDisplayableText(message);
- return truncateText(outputText);
- })()}
-
- {completion.model}
-
- {new Date(completion.created * 1000).toLocaleString()}
-
+ {truncateText(row.input)}
+ {truncateText(row.output)}
+ {row.model}
+ {row.createdTime}
))}
diff --git a/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx b/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx
new file mode 100644
index 000000000..6ddc0eacc
--- /dev/null
+++ b/llama_stack/ui/components/responses/grouping/grouped-items-display.tsx
@@ -0,0 +1,56 @@
+import { useFunctionCallGrouping } from "../hooks/function-call-grouping";
+import { ItemRenderer } from "../items/item-renderer";
+import { GroupedFunctionCallItemComponent } from "../items/grouped-function-call-item";
+import {
+ isFunctionCallItem,
+ isFunctionCallOutputItem,
+ AnyResponseItem,
+} from "../utils/item-types";
+
+interface GroupedItemsDisplayProps {
+ items: AnyResponseItem[];
+ keyPrefix: string;
+ defaultRole?: string;
+}
+
+export function GroupedItemsDisplay({
+ items,
+ keyPrefix,
+ defaultRole = "unknown",
+}: GroupedItemsDisplayProps) {
+ const groupedItems = useFunctionCallGrouping(items);
+
+ return (
+ <>
+ {groupedItems.map((groupedItem) => {
+ // If this is a function call with an output, render the grouped component
+ if (
+ groupedItem.outputItem &&
+ isFunctionCallItem(groupedItem.item) &&
+ isFunctionCallOutputItem(groupedItem.outputItem)
+ ) {
+ return (
+
+ );
+ }
+
+ // Otherwise, render the individual item
+ return (
+
+ );
+ })}
+ >
+ );
+}
diff --git a/llama_stack/ui/components/responses/hooks/function-call-grouping.ts b/llama_stack/ui/components/responses/hooks/function-call-grouping.ts
new file mode 100644
index 000000000..2994354d5
--- /dev/null
+++ b/llama_stack/ui/components/responses/hooks/function-call-grouping.ts
@@ -0,0 +1,92 @@
+import { useMemo } from "react";
+import {
+ isFunctionCallOutputItem,
+ AnyResponseItem,
+ FunctionCallOutputItem,
+} from "../utils/item-types";
+
+export interface GroupedItem {
+ item: AnyResponseItem;
+ index: number;
+ outputItem?: AnyResponseItem;
+ outputIndex?: number;
+}
+
+/**
+ * Hook to group function calls with their corresponding outputs
+ * @param items Array of items to group
+ * @returns Array of grouped items with their outputs
+ */
+export function useFunctionCallGrouping(
+ items: AnyResponseItem[],
+): GroupedItem[] {
+ return useMemo(() => {
+ const groupedItems: GroupedItem[] = [];
+ const processedIndices = new Set();
+
+ // Build a map of call_id to indices for function_call_output items
+ const callIdToIndices = new Map();
+
+ for (let i = 0; i < items.length; i++) {
+ const item = items[i];
+ if (isFunctionCallOutputItem(item)) {
+ if (!callIdToIndices.has(item.call_id)) {
+ callIdToIndices.set(item.call_id, []);
+ }
+ callIdToIndices.get(item.call_id)!.push(i);
+ }
+ }
+
+ // Process items and group function calls with their outputs
+ for (let i = 0; i < items.length; i++) {
+ if (processedIndices.has(i)) {
+ continue;
+ }
+
+ const currentItem = items[i];
+
+ if (
+ currentItem.type === "function_call" &&
+ "name" in currentItem &&
+ "call_id" in currentItem
+ ) {
+ const functionCallId = currentItem.call_id as string;
+ let outputIndex = -1;
+ let outputItem: FunctionCallOutputItem | null = null;
+
+ const relatedIndices = callIdToIndices.get(functionCallId) || [];
+ for (const idx of relatedIndices) {
+ const potentialOutput = items[idx];
+ outputIndex = idx;
+ outputItem = potentialOutput as FunctionCallOutputItem;
+ break;
+ }
+
+ if (outputItem && outputIndex !== -1) {
+ // Group function call with its function_call_output
+ groupedItems.push({
+ item: currentItem,
+ index: i,
+ outputItem,
+ outputIndex,
+ });
+
+ // Mark both items as processed
+ processedIndices.add(i);
+ processedIndices.add(outputIndex);
+
+ // Matching function call and output found, skip to next item
+ continue;
+ }
+ }
+ // render normally
+ groupedItems.push({
+ item: currentItem,
+ index: i,
+ });
+ processedIndices.add(i);
+ }
+
+ return groupedItems;
+ }, [items]);
+}
diff --git a/llama_stack/ui/components/responses/items/function-call-item.tsx b/llama_stack/ui/components/responses/items/function-call-item.tsx
new file mode 100644
index 000000000..beca935f0
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/function-call-item.tsx
@@ -0,0 +1,29 @@
+import {
+ MessageBlock,
+ ToolCallBlock,
+} from "@/components/ui/message-components";
+import { FunctionCallItem } from "../utils/item-types";
+
+interface FunctionCallItemProps {
+ item: FunctionCallItem;
+ index: number;
+ keyPrefix: string;
+}
+
+export function FunctionCallItemComponent({
+ item,
+ index,
+ keyPrefix,
+}: FunctionCallItemProps) {
+ const name = item.name || "unknown";
+ const args = item.arguments || "{}";
+ const formattedFunctionCall = `${name}(${args})`;
+
+ return (
+ {formattedFunctionCall}}
+ />
+ );
+}
diff --git a/llama_stack/ui/components/responses/items/generic-item.tsx b/llama_stack/ui/components/responses/items/generic-item.tsx
new file mode 100644
index 000000000..6b6f56603
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/generic-item.tsx
@@ -0,0 +1,37 @@
+import {
+ MessageBlock,
+ ToolCallBlock,
+} from "@/components/ui/message-components";
+import { BaseItem } from "../utils/item-types";
+
+interface GenericItemProps {
+ item: BaseItem;
+ index: number;
+ keyPrefix: string;
+}
+
+export function GenericItemComponent({
+ item,
+ index,
+ keyPrefix,
+}: GenericItemProps) {
+ // Handle other types like function calls, tool outputs, etc.
+ const itemData = item as Record;
+
+ const content = itemData.content
+ ? typeof itemData.content === "string"
+ ? itemData.content
+ : JSON.stringify(itemData.content, null, 2)
+ : JSON.stringify(itemData, null, 2);
+
+ const label = keyPrefix === "input" ? "Input" : "Output";
+
+ return (
+ {content}}
+ />
+ );
+}
diff --git a/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx b/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx
new file mode 100644
index 000000000..ded0ced71
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/grouped-function-call-item.tsx
@@ -0,0 +1,54 @@
+import {
+ MessageBlock,
+ ToolCallBlock,
+} from "@/components/ui/message-components";
+import { FunctionCallItem, FunctionCallOutputItem } from "../utils/item-types";
+
+interface GroupedFunctionCallItemProps {
+ functionCall: FunctionCallItem;
+ output: FunctionCallOutputItem;
+ index: number;
+ keyPrefix: string;
+}
+
+export function GroupedFunctionCallItemComponent({
+ functionCall,
+ output,
+ index,
+ keyPrefix,
+}: GroupedFunctionCallItemProps) {
+ const name = functionCall.name || "unknown";
+ const args = functionCall.arguments || "{}";
+
+ // Extract the output content from function_call_output
+ let outputContent = "";
+ if (output.output) {
+ outputContent =
+ typeof output.output === "string"
+ ? output.output
+ : JSON.stringify(output.output);
+ } else {
+ outputContent = JSON.stringify(output, null, 2);
+ }
+
+ const functionCallContent = (
+
+
+ Arguments
+ {`${name}(${args})`}
+
+
+ Output
+ {outputContent}
+
+
+ );
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/responses/items/index.ts b/llama_stack/ui/components/responses/items/index.ts
new file mode 100644
index 000000000..d7bcc2ea4
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/index.ts
@@ -0,0 +1,6 @@
+export { MessageItemComponent } from "./message-item";
+export { FunctionCallItemComponent } from "./function-call-item";
+export { WebSearchItemComponent } from "./web-search-item";
+export { GenericItemComponent } from "./generic-item";
+export { GroupedFunctionCallItemComponent } from "./grouped-function-call-item";
+export { ItemRenderer } from "./item-renderer";
diff --git a/llama_stack/ui/components/responses/items/item-renderer.tsx b/llama_stack/ui/components/responses/items/item-renderer.tsx
new file mode 100644
index 000000000..8f65d50c4
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/item-renderer.tsx
@@ -0,0 +1,60 @@
+import {
+ isMessageItem,
+ isFunctionCallItem,
+ isWebSearchCallItem,
+ AnyResponseItem,
+} from "../utils/item-types";
+import { MessageItemComponent } from "./message-item";
+import { FunctionCallItemComponent } from "./function-call-item";
+import { WebSearchItemComponent } from "./web-search-item";
+import { GenericItemComponent } from "./generic-item";
+
+interface ItemRendererProps {
+ item: AnyResponseItem;
+ index: number;
+ keyPrefix: string;
+ defaultRole?: string;
+}
+
+export function ItemRenderer({
+ item,
+ index,
+ keyPrefix,
+ defaultRole = "unknown",
+}: ItemRendererProps) {
+ if (isMessageItem(item)) {
+ return (
+
+ );
+ }
+
+ if (isFunctionCallItem(item)) {
+ return (
+
+ );
+ }
+
+ if (isWebSearchCallItem(item)) {
+ return (
+
+ );
+ }
+
+ // Fallback to generic item for unknown types
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/responses/items/message-item.tsx b/llama_stack/ui/components/responses/items/message-item.tsx
new file mode 100644
index 000000000..532fddfaa
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/message-item.tsx
@@ -0,0 +1,41 @@
+import { MessageBlock } from "@/components/ui/message-components";
+import { MessageItem } from "../utils/item-types";
+
+interface MessageItemProps {
+ item: MessageItem;
+ index: number;
+ keyPrefix: string;
+ defaultRole?: string;
+}
+
+export function MessageItemComponent({
+ item,
+ index,
+ keyPrefix,
+ defaultRole = "unknown",
+}: MessageItemProps) {
+ let content = "";
+
+ if (typeof item.content === "string") {
+ content = item.content;
+ } else if (Array.isArray(item.content)) {
+ content = item.content
+ .map((c) => {
+ return c.type === "input_text" || c.type === "output_text"
+ ? c.text
+ : JSON.stringify(c);
+ })
+ .join(" ");
+ }
+
+ const role = item.role || defaultRole;
+ const label = role.charAt(0).toUpperCase() + role.slice(1);
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/responses/items/web-search-item.tsx b/llama_stack/ui/components/responses/items/web-search-item.tsx
new file mode 100644
index 000000000..aaa5741ce
--- /dev/null
+++ b/llama_stack/ui/components/responses/items/web-search-item.tsx
@@ -0,0 +1,28 @@
+import {
+ MessageBlock,
+ ToolCallBlock,
+} from "@/components/ui/message-components";
+import { WebSearchCallItem } from "../utils/item-types";
+
+interface WebSearchItemProps {
+ item: WebSearchCallItem;
+ index: number;
+ keyPrefix: string;
+}
+
+export function WebSearchItemComponent({
+ item,
+ index,
+ keyPrefix,
+}: WebSearchItemProps) {
+ const formattedWebSearch = `web_search_call(status: ${item.status})`;
+
+ return (
+ {formattedWebSearch}}
+ />
+ );
+}
diff --git a/llama_stack/ui/components/responses/responses-detail.test.tsx b/llama_stack/ui/components/responses/responses-detail.test.tsx
new file mode 100644
index 000000000..f426dc059
--- /dev/null
+++ b/llama_stack/ui/components/responses/responses-detail.test.tsx
@@ -0,0 +1,777 @@
+import React from "react";
+import { render, screen } from "@testing-library/react";
+import "@testing-library/jest-dom";
+import { ResponseDetailView } from "./responses-detail";
+import { OpenAIResponse, InputItemListResponse } from "@/lib/types";
+
+describe("ResponseDetailView", () => {
+ const defaultProps = {
+ response: null,
+ inputItems: null,
+ isLoading: false,
+ isLoadingInputItems: false,
+ error: null,
+ inputItemsError: null,
+ id: "test_id",
+ };
+
+ describe("Loading State", () => {
+ test("renders loading skeleton when isLoading is true", () => {
+ const { container } = render(
+ ,
+ );
+
+ // Check for skeleton elements
+ const skeletons = container.querySelectorAll('[data-slot="skeleton"]');
+ expect(skeletons.length).toBeGreaterThan(0);
+
+ // The title is replaced by a skeleton when loading, so we shouldn't expect the text
+ });
+ });
+
+ describe("Error State", () => {
+ test("renders error message when error prop is provided", () => {
+ const errorMessage = "Network Error";
+ render(
+ ,
+ );
+
+ expect(screen.getByText("Responses Details")).toBeInTheDocument();
+ // The error message is split across elements, so we check for parts
+ expect(
+ screen.getByText(/Error loading details for ID/),
+ ).toBeInTheDocument();
+ expect(screen.getByText(/test_id/)).toBeInTheDocument();
+ expect(screen.getByText(/Network Error/)).toBeInTheDocument();
+ });
+
+ test("renders default error message when error.message is not available", () => {
+ render(
+ ,
+ );
+
+ expect(
+ screen.getByText(/Error loading details for ID/),
+ ).toBeInTheDocument();
+ expect(screen.getByText(/test_id/)).toBeInTheDocument();
+ });
+ });
+
+ describe("Not Found State", () => {
+ test("renders not found message when response is null and not loading/error", () => {
+ render();
+
+ expect(screen.getByText("Responses Details")).toBeInTheDocument();
+ // The message is split across elements
+ expect(screen.getByText(/No details found for ID:/)).toBeInTheDocument();
+ expect(screen.getByText(/test_id/)).toBeInTheDocument();
+ });
+ });
+
+ describe("Response Data Rendering", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "llama-test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: "Test response output",
+ },
+ ],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Test input message",
+ },
+ ],
+ temperature: 0.7,
+ top_p: 0.9,
+ parallel_tool_calls: true,
+ previous_response_id: "prev_resp_456",
+ };
+
+ test("renders response data with input and output sections", () => {
+ render();
+
+ // Check main sections
+ expect(screen.getByText("Responses Details")).toBeInTheDocument();
+ expect(screen.getByText("Input")).toBeInTheDocument();
+ expect(screen.getByText("Output")).toBeInTheDocument();
+
+ // Check input content
+ expect(screen.getByText("Test input message")).toBeInTheDocument();
+ expect(screen.getByText("User")).toBeInTheDocument();
+
+ // Check output content
+ expect(screen.getByText("Test response output")).toBeInTheDocument();
+ expect(screen.getByText("Assistant")).toBeInTheDocument();
+ });
+
+ test("renders properties sidebar with all response metadata", () => {
+ render();
+
+ // Check properties - use regex to handle text split across elements
+ expect(screen.getByText(/Created/)).toBeInTheDocument();
+ expect(
+ screen.getByText(new Date(1710000000 * 1000).toLocaleString()),
+ ).toBeInTheDocument();
+
+ // Check for the specific ID label (not Previous Response ID)
+ expect(
+ screen.getByText((content, element) => {
+ return element?.tagName === "STRONG" && content === "ID:";
+ }),
+ ).toBeInTheDocument();
+ expect(screen.getByText("resp_123")).toBeInTheDocument();
+
+ expect(screen.getByText(/Model/)).toBeInTheDocument();
+ expect(screen.getByText("llama-test-model")).toBeInTheDocument();
+
+ expect(screen.getByText(/Status/)).toBeInTheDocument();
+ expect(screen.getByText("completed")).toBeInTheDocument();
+
+ expect(screen.getByText(/Temperature/)).toBeInTheDocument();
+ expect(screen.getByText("0.7")).toBeInTheDocument();
+
+ expect(screen.getByText(/Top P/)).toBeInTheDocument();
+ expect(screen.getByText("0.9")).toBeInTheDocument();
+
+ expect(screen.getByText(/Parallel Tool Calls/)).toBeInTheDocument();
+ expect(screen.getByText("Yes")).toBeInTheDocument();
+
+ expect(screen.getByText(/Previous Response ID/)).toBeInTheDocument();
+ expect(screen.getByText("prev_resp_456")).toBeInTheDocument();
+ });
+
+ test("handles optional properties correctly", () => {
+ const minimalResponse: OpenAIResponse = {
+ id: "resp_minimal",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [],
+ };
+
+ render(
+ ,
+ );
+
+ // Should show required properties
+ expect(screen.getByText("resp_minimal")).toBeInTheDocument();
+ expect(screen.getByText("test-model")).toBeInTheDocument();
+ expect(screen.getByText("completed")).toBeInTheDocument();
+
+ // Should not show optional properties
+ expect(screen.queryByText("Temperature")).not.toBeInTheDocument();
+ expect(screen.queryByText("Top P")).not.toBeInTheDocument();
+ expect(screen.queryByText("Parallel Tool Calls")).not.toBeInTheDocument();
+ expect(
+ screen.queryByText("Previous Response ID"),
+ ).not.toBeInTheDocument();
+ });
+
+ test("renders error information when response has error", () => {
+ const errorResponse: OpenAIResponse = {
+ ...mockResponse,
+ error: {
+ code: "invalid_request",
+ message: "The request was invalid",
+ },
+ };
+
+ render();
+
+ // The error is shown in the properties sidebar, not as a separate "Error" label
+ expect(
+ screen.getByText("invalid_request: The request was invalid"),
+ ).toBeInTheDocument();
+ });
+ });
+
+ describe("Input Items Handling", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [{ type: "message", role: "assistant", content: "output" }],
+ input: [{ type: "message", role: "user", content: "fallback input" }],
+ };
+
+ test("shows loading state for input items", () => {
+ render(
+ ,
+ );
+
+ // Check for skeleton loading in input items section
+ const { container } = render(
+ ,
+ );
+
+ const skeletons = container.querySelectorAll('[data-slot="skeleton"]');
+ expect(skeletons.length).toBeGreaterThan(0);
+ });
+
+ test("shows error message for input items with fallback", () => {
+ render(
+ ,
+ );
+
+ expect(
+ screen.getByText(
+ "Error loading input items: Failed to load input items",
+ ),
+ ).toBeInTheDocument();
+ expect(
+ screen.getByText("Falling back to response input data."),
+ ).toBeInTheDocument();
+
+ // Should still show fallback input data
+ expect(screen.getByText("fallback input")).toBeInTheDocument();
+ });
+
+ test("uses input items data when available", () => {
+ const mockInputItems: InputItemListResponse = {
+ object: "list",
+ data: [
+ {
+ type: "message",
+ role: "user",
+ content: "input from items API",
+ },
+ ],
+ };
+
+ render(
+ ,
+ );
+
+ // Should show input items data, not response.input
+ expect(screen.getByText("input from items API")).toBeInTheDocument();
+ expect(screen.queryByText("fallback input")).not.toBeInTheDocument();
+ });
+
+ test("falls back to response.input when input items is empty", () => {
+ const emptyInputItems: InputItemListResponse = {
+ object: "list",
+ data: [],
+ };
+
+ render(
+ ,
+ );
+
+ // Should show fallback input data
+ expect(screen.getByText("fallback input")).toBeInTheDocument();
+ });
+
+ test("shows no input message when no data available", () => {
+ const responseWithoutInput: OpenAIResponse = {
+ ...mockResponse,
+ input: [],
+ };
+
+ render(
+ ,
+ );
+
+ expect(screen.getByText("No input data available.")).toBeInTheDocument();
+ });
+ });
+
+ describe("Input Display Components", () => {
+ test("renders string content input correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Simple string input",
+ },
+ ],
+ };
+
+ render();
+
+ expect(screen.getByText("Simple string input")).toBeInTheDocument();
+ expect(screen.getByText("User")).toBeInTheDocument();
+ });
+
+ test("renders array content input correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: [
+ { type: "input_text", text: "First part" },
+ { type: "output_text", text: "Second part" },
+ ],
+ },
+ ],
+ };
+
+ render();
+
+ expect(screen.getByText("First part Second part")).toBeInTheDocument();
+ expect(screen.getByText("User")).toBeInTheDocument();
+ });
+
+ test("renders non-message input types correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "function_call",
+ content: "function call content",
+ },
+ ],
+ };
+
+ render();
+
+ expect(screen.getByText("function call content")).toBeInTheDocument();
+ // Use getAllByText to find the specific "Input" with the type detail
+ const inputElements = screen.getAllByText("Input");
+ expect(inputElements.length).toBeGreaterThan(0);
+ expect(screen.getByText("(function_call)")).toBeInTheDocument();
+ });
+
+ test("handles input with object content", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "custom_type",
+ content: JSON.stringify({ key: "value", nested: { data: "test" } }),
+ },
+ ],
+ };
+
+ render();
+
+ // Should show JSON stringified content (without quotes around keys in the rendered output)
+ expect(screen.getByText(/key.*value/)).toBeInTheDocument();
+ // Use getAllByText to find the specific "Input" with the type detail
+ const inputElements = screen.getAllByText("Input");
+ expect(inputElements.length).toBeGreaterThan(0);
+ expect(screen.getByText("(custom_type)")).toBeInTheDocument();
+ });
+
+ test("renders function call input correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "function_call",
+ id: "call_456",
+ status: "completed",
+ name: "input_function",
+ arguments: '{"param": "value"}',
+ },
+ ],
+ };
+
+ render();
+
+ expect(
+ screen.getByText('input_function({"param": "value"})'),
+ ).toBeInTheDocument();
+ expect(screen.getByText("Function Call")).toBeInTheDocument();
+ });
+
+ test("renders web search call input correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "web_search_call",
+ id: "search_789",
+ status: "completed",
+ },
+ ],
+ };
+
+ render();
+
+ expect(
+ screen.getByText("web_search_call(status: completed)"),
+ ).toBeInTheDocument();
+ expect(screen.getByText("Function Call")).toBeInTheDocument();
+ expect(screen.getByText("(Web Search)")).toBeInTheDocument();
+ });
+ });
+
+ describe("Output Display Components", () => {
+ test("renders message output with string content", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: "Simple string output",
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ expect(screen.getByText("Simple string output")).toBeInTheDocument();
+ expect(screen.getByText("Assistant")).toBeInTheDocument();
+ });
+
+ test("renders message output with array content", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: [
+ { type: "output_text", text: "First output" },
+ { type: "input_text", text: "Second output" },
+ ],
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ expect(
+ screen.getByText("First output Second output"),
+ ).toBeInTheDocument();
+ expect(screen.getByText("Assistant")).toBeInTheDocument();
+ });
+
+ test("renders function call output correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ name: "search_function",
+ arguments: '{"query": "test"}',
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ expect(
+ screen.getByText('search_function({"query": "test"})'),
+ ).toBeInTheDocument();
+ expect(screen.getByText("Function Call")).toBeInTheDocument();
+ });
+
+ test("renders function call output without arguments", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ name: "simple_function",
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ expect(screen.getByText("simple_function({})")).toBeInTheDocument();
+ expect(screen.getByText(/Function Call/)).toBeInTheDocument();
+ });
+
+ test("renders web search call output correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "web_search_call",
+ id: "search_123",
+ status: "completed",
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ expect(
+ screen.getByText("web_search_call(status: completed)"),
+ ).toBeInTheDocument();
+ expect(screen.getByText(/Function Call/)).toBeInTheDocument();
+ expect(screen.getByText("(Web Search)")).toBeInTheDocument();
+ });
+
+ test("renders unknown output types with JSON fallback", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "unknown_type",
+ custom_field: "custom_value",
+ data: { nested: "object" },
+ } as any,
+ ],
+ input: [],
+ };
+
+ render();
+
+ // Should show JSON stringified content
+ expect(
+ screen.getByText(/custom_field.*custom_value/),
+ ).toBeInTheDocument();
+ expect(screen.getByText("(unknown_type)")).toBeInTheDocument();
+ });
+
+ test("shows no output message when output array is empty", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [],
+ };
+
+ render();
+
+ expect(screen.getByText("No output data available.")).toBeInTheDocument();
+ });
+
+ test("groups function call with its output correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ name: "get_weather",
+ arguments: '{"city": "Tokyo"}',
+ },
+ {
+ type: "message",
+ role: "assistant",
+ call_id: "call_123",
+ content: "sunny and warm",
+ } as any, // Using any to bypass the type restriction for this test
+ ],
+ input: [],
+ };
+
+ render();
+
+ // Should show the function call and message as separate items (not grouped)
+ expect(screen.getByText("Function Call")).toBeInTheDocument();
+ expect(
+ screen.getByText('get_weather({"city": "Tokyo"})'),
+ ).toBeInTheDocument();
+ expect(screen.getByText("Assistant")).toBeInTheDocument();
+ expect(screen.getByText("sunny and warm")).toBeInTheDocument();
+
+ // Should NOT have the grouped "Arguments" and "Output" labels
+ expect(screen.queryByText("Arguments")).not.toBeInTheDocument();
+ });
+
+ test("groups function call with function_call_output correctly", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ call_id: "call_123",
+ status: "completed",
+ name: "get_weather",
+ arguments: '{"city": "Tokyo"}',
+ },
+ {
+ type: "function_call_output",
+ id: "fc_68364957013081...",
+ status: "completed",
+ call_id: "call_123",
+ output: "sunny and warm",
+ } as any, // Using any to bypass the type restriction for this test
+ ],
+ input: [],
+ };
+
+ render();
+
+ // Should show the function call grouped with its clean output
+ expect(screen.getByText("Function Call")).toBeInTheDocument();
+ expect(screen.getByText("Arguments")).toBeInTheDocument();
+ expect(
+ screen.getByText('get_weather({"city": "Tokyo"})'),
+ ).toBeInTheDocument();
+ // Use getAllByText since there are multiple "Output" elements (card title and output label)
+ const outputElements = screen.getAllByText("Output");
+ expect(outputElements.length).toBeGreaterThan(0);
+ expect(screen.getByText("sunny and warm")).toBeInTheDocument();
+ });
+ });
+
+ describe("Edge Cases and Error Handling", () => {
+ test("handles missing role in message input", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [],
+ input: [
+ {
+ type: "message",
+ content: "Message without role",
+ },
+ ],
+ };
+
+ render();
+
+ expect(screen.getByText("Message without role")).toBeInTheDocument();
+ expect(screen.getByText("Unknown")).toBeInTheDocument(); // Default role
+ });
+
+ test("handles missing name in function call output", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ },
+ ],
+ input: [],
+ };
+
+ render();
+
+ // When name is missing, it falls back to JSON.stringify of the entire output
+ const functionCallElements = screen.getAllByText(/function_call/);
+ expect(functionCallElements.length).toBeGreaterThan(0);
+ expect(screen.getByText(/call_123/)).toBeInTheDocument();
+ });
+ });
+});
diff --git a/llama_stack/ui/components/responses/responses-detail.tsx b/llama_stack/ui/components/responses/responses-detail.tsx
new file mode 100644
index 000000000..c8c447ba4
--- /dev/null
+++ b/llama_stack/ui/components/responses/responses-detail.tsx
@@ -0,0 +1,171 @@
+"use client";
+
+import { OpenAIResponse, InputItemListResponse } from "@/lib/types";
+import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
+import { Skeleton } from "@/components/ui/skeleton";
+import {
+ DetailLoadingView,
+ DetailErrorView,
+ DetailNotFoundView,
+ DetailLayout,
+ PropertiesCard,
+ PropertyItem,
+} from "@/components/layout/detail-layout";
+import { GroupedItemsDisplay } from "./grouping/grouped-items-display";
+
+interface ResponseDetailViewProps {
+ response: OpenAIResponse | null;
+ inputItems: InputItemListResponse | null;
+ isLoading: boolean;
+ isLoadingInputItems: boolean;
+ error: Error | null;
+ inputItemsError: Error | null;
+ id: string;
+}
+
+export function ResponseDetailView({
+ response,
+ inputItems,
+ isLoading,
+ isLoadingInputItems,
+ error,
+ inputItemsError,
+ id,
+}: ResponseDetailViewProps) {
+ const title = "Responses Details";
+
+ if (error) {
+ return ;
+ }
+
+ if (isLoading) {
+ return ;
+ }
+
+ if (!response) {
+ return ;
+ }
+
+ // Main content cards
+ const mainContent = (
+ <>
+
+
+ Input
+
+
+ {/* Show loading state for input items */}
+ {isLoadingInputItems ? (
+
+
+
+
+
+ ) : inputItemsError ? (
+
+ Error loading input items: {inputItemsError.message}
+
+
+ Falling back to response input data.
+
+
+ ) : null}
+
+ {/* Display input items if available, otherwise fall back to response.input */}
+ {(() => {
+ const dataToDisplay =
+ inputItems?.data && inputItems.data.length > 0
+ ? inputItems.data
+ : response.input;
+
+ if (dataToDisplay && dataToDisplay.length > 0) {
+ return (
+
+ );
+ } else {
+ return (
+
+ No input data available.
+
+ );
+ }
+ })()}
+
+
+
+
+
+ Output
+
+
+ {response.output?.length > 0 ? (
+
+ ) : (
+
+ No output data available.
+
+ )}
+
+
+ >
+ );
+
+ // Properties sidebar
+ const sidebar = (
+
+
+
+
+
+ {response.temperature && (
+
+ )}
+ {response.top_p && }
+ {response.parallel_tool_calls && (
+
+ )}
+ {response.previous_response_id && (
+ {response.previous_response_id}
+ }
+ hasBorder
+ />
+ )}
+ {response.error && (
+
+ {response.error.code}: {response.error.message}
+
+ }
+ className="pt-1 mt-1 border-t border-red-200"
+ />
+ )}
+
+ );
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/responses/responses-table.test.tsx b/llama_stack/ui/components/responses/responses-table.test.tsx
new file mode 100644
index 000000000..7c45c57d3
--- /dev/null
+++ b/llama_stack/ui/components/responses/responses-table.test.tsx
@@ -0,0 +1,537 @@
+import React from "react";
+import { render, screen, fireEvent } from "@testing-library/react";
+import "@testing-library/jest-dom";
+import { ResponsesTable } from "./responses-table";
+import { OpenAIResponse } from "@/lib/types";
+
+// Mock next/navigation
+const mockPush = jest.fn();
+jest.mock("next/navigation", () => ({
+ useRouter: () => ({
+ push: mockPush,
+ }),
+}));
+
+// Mock helper functions
+jest.mock("@/lib/truncate-text");
+
+// Import the mocked functions
+import { truncateText as originalTruncateText } from "@/lib/truncate-text";
+
+// Cast to jest.Mock for typings
+const truncateText = originalTruncateText as jest.Mock;
+
+describe("ResponsesTable", () => {
+ const defaultProps = {
+ data: [] as OpenAIResponse[],
+ isLoading: false,
+ error: null,
+ };
+
+ beforeEach(() => {
+ // Reset all mocks before each test
+ mockPush.mockClear();
+ truncateText.mockClear();
+
+ // Default pass-through implementation
+ truncateText.mockImplementation((text: string | undefined) => text);
+ });
+
+ test("renders without crashing with default props", () => {
+ render();
+ expect(screen.getByText("No responses found.")).toBeInTheDocument();
+ });
+
+ test("click on a row navigates to the correct URL", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_123",
+ object: "response",
+ created_at: Math.floor(Date.now() / 1000),
+ model: "llama-test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: "Test output",
+ },
+ ],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Test input",
+ },
+ ],
+ };
+
+ render();
+
+ const row = screen.getByText("Test input").closest("tr");
+ if (row) {
+ fireEvent.click(row);
+ expect(mockPush).toHaveBeenCalledWith("/logs/responses/resp_123");
+ } else {
+ throw new Error('Row with "Test input" not found for router mock test.');
+ }
+ });
+
+ describe("Loading State", () => {
+ test("renders skeleton UI when isLoading is true", () => {
+ const { container } = render(
+ ,
+ );
+
+ // Check for skeleton in the table caption
+ const tableCaption = container.querySelector("caption");
+ expect(tableCaption).toBeInTheDocument();
+ if (tableCaption) {
+ const captionSkeleton = tableCaption.querySelector(
+ '[data-slot="skeleton"]',
+ );
+ expect(captionSkeleton).toBeInTheDocument();
+ }
+
+ // Check for skeletons in the table body cells
+ const tableBody = container.querySelector("tbody");
+ expect(tableBody).toBeInTheDocument();
+ if (tableBody) {
+ const bodySkeletons = tableBody.querySelectorAll(
+ '[data-slot="skeleton"]',
+ );
+ expect(bodySkeletons.length).toBeGreaterThan(0);
+ }
+ });
+ });
+
+ describe("Error State", () => {
+ test("renders error message when error prop is provided", () => {
+ const errorMessage = "Network Error";
+ render(
+ ,
+ );
+ expect(
+ screen.getByText(`Error fetching data: ${errorMessage}`),
+ ).toBeInTheDocument();
+ });
+
+ test("renders default error message when error.message is not available", () => {
+ render(
+ ,
+ );
+ expect(
+ screen.getByText("Error fetching data: An unknown error occurred"),
+ ).toBeInTheDocument();
+ });
+
+ test("renders default error message when error prop is an object without message", () => {
+ render();
+ expect(
+ screen.getByText("Error fetching data: An unknown error occurred"),
+ ).toBeInTheDocument();
+ });
+ });
+
+ describe("Empty State", () => {
+ test('renders "No responses found." and no table when data array is empty', () => {
+ render();
+ expect(screen.getByText("No responses found.")).toBeInTheDocument();
+
+ // Ensure that the table structure is NOT rendered in the empty state
+ const table = screen.queryByRole("table");
+ expect(table).not.toBeInTheDocument();
+ });
+ });
+
+ describe("Data Rendering", () => {
+ test("renders table caption, headers, and response data correctly", () => {
+ const mockResponses = [
+ {
+ id: "resp_1",
+ object: "response" as const,
+ created_at: 1710000000,
+ model: "llama-test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message" as const,
+ role: "assistant" as const,
+ content: "Test output",
+ },
+ ],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Test input",
+ },
+ ],
+ },
+ {
+ id: "resp_2",
+ object: "response" as const,
+ created_at: 1710001000,
+ model: "llama-another-model",
+ status: "completed",
+ output: [
+ {
+ type: "message" as const,
+ role: "assistant" as const,
+ content: "Another output",
+ },
+ ],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Another input",
+ },
+ ],
+ },
+ ];
+
+ render(
+ ,
+ );
+
+ // Table caption
+ expect(
+ screen.getByText("A list of your recent responses."),
+ ).toBeInTheDocument();
+
+ // Table headers
+ expect(screen.getByText("Input")).toBeInTheDocument();
+ expect(screen.getByText("Output")).toBeInTheDocument();
+ expect(screen.getByText("Model")).toBeInTheDocument();
+ expect(screen.getByText("Created")).toBeInTheDocument();
+
+ // Data rows
+ expect(screen.getByText("Test input")).toBeInTheDocument();
+ expect(screen.getByText("Test output")).toBeInTheDocument();
+ expect(screen.getByText("llama-test-model")).toBeInTheDocument();
+ expect(
+ screen.getByText(new Date(1710000000 * 1000).toLocaleString()),
+ ).toBeInTheDocument();
+
+ expect(screen.getByText("Another input")).toBeInTheDocument();
+ expect(screen.getByText("Another output")).toBeInTheDocument();
+ expect(screen.getByText("llama-another-model")).toBeInTheDocument();
+ expect(
+ screen.getByText(new Date(1710001000 * 1000).toLocaleString()),
+ ).toBeInTheDocument();
+ });
+ });
+
+ describe("Input Text Extraction", () => {
+ test("extracts text from string content", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_string",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [{ type: "message", role: "assistant", content: "output" }],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: "Simple string input",
+ },
+ ],
+ };
+
+ render(
+ ,
+ );
+ expect(screen.getByText("Simple string input")).toBeInTheDocument();
+ });
+
+ test("extracts text from array content with input_text type", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_array",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [{ type: "message", role: "assistant", content: "output" }],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: [
+ { type: "input_text", text: "Array input text" },
+ { type: "input_text", text: "Should not be used" },
+ ],
+ },
+ ],
+ };
+
+ render(
+ ,
+ );
+ expect(screen.getByText("Array input text")).toBeInTheDocument();
+ });
+
+ test("returns empty string when no message input found", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_no_input",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [{ type: "message", role: "assistant", content: "output" }],
+ input: [
+ {
+ type: "other_type",
+ content: "Not a message",
+ },
+ ],
+ };
+
+ const { container } = render(
+ ,
+ );
+
+ // Find the input cell (first cell in the data row) and verify it's empty
+ const inputCell = container.querySelector("tbody tr td:first-child");
+ expect(inputCell).toBeInTheDocument();
+ expect(inputCell).toHaveTextContent("");
+ });
+ });
+
+ describe("Output Text Extraction", () => {
+ test("extracts text from string message content", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_string_output",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: "Simple string output",
+ },
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ expect(screen.getByText("Simple string output")).toBeInTheDocument();
+ });
+
+ test("extracts text from array message content with output_text type", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_array_output",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: [
+ { type: "output_text", text: "Array output text" },
+ { type: "output_text", text: "Should not be used" },
+ ],
+ },
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ expect(screen.getByText("Array output text")).toBeInTheDocument();
+ });
+
+ test("formats function call output", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_function_call",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ name: "search_function",
+ arguments: '{"query": "test"}',
+ },
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ expect(
+ screen.getByText('search_function({"query": "test"})'),
+ ).toBeInTheDocument();
+ });
+
+ test("formats function call output without arguments", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_function_no_args",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "function_call",
+ id: "call_123",
+ status: "completed",
+ name: "simple_function",
+ },
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ expect(screen.getByText("simple_function({})")).toBeInTheDocument();
+ });
+
+ test("formats web search call output", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_web_search",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "web_search_call",
+ id: "search_123",
+ status: "completed",
+ },
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ expect(
+ screen.getByText("web_search_call(status: completed)"),
+ ).toBeInTheDocument();
+ });
+
+ test("falls back to JSON.stringify for unknown tool call types", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_unknown_tool",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "unknown_call",
+ id: "unknown_123",
+ status: "completed",
+ custom_field: "custom_value",
+ } as any,
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ // Should contain the JSON stringified version
+ expect(screen.getByText(/unknown_call/)).toBeInTheDocument();
+ });
+
+ test("falls back to JSON.stringify for entire output when no message or tool call found", () => {
+ const mockResponse: OpenAIResponse = {
+ id: "resp_fallback",
+ object: "response",
+ created_at: 1710000000,
+ model: "test-model",
+ status: "completed",
+ output: [
+ {
+ type: "unknown_type",
+ data: "some data",
+ } as any,
+ ],
+ input: [{ type: "message", content: "input" }],
+ };
+
+ render(
+ ,
+ );
+ // Should contain the JSON stringified version of the output array
+ expect(screen.getByText(/unknown_type/)).toBeInTheDocument();
+ });
+ });
+
+ describe("Text Truncation", () => {
+ test("truncates long input and output text", () => {
+ // Specific mock implementation for this test
+ truncateText.mockImplementation(
+ (text: string | undefined, maxLength?: number) => {
+ const defaultTestMaxLength = 10;
+ const effectiveMaxLength = maxLength ?? defaultTestMaxLength;
+ return typeof text === "string" && text.length > effectiveMaxLength
+ ? text.slice(0, effectiveMaxLength) + "..."
+ : text;
+ },
+ );
+
+ const longInput =
+ "This is a very long input message that should be truncated.";
+ const longOutput =
+ "This is a very long output message that should also be truncated.";
+
+ const mockResponse: OpenAIResponse = {
+ id: "resp_trunc",
+ object: "response",
+ created_at: 1710002000,
+ model: "llama-trunc-model",
+ status: "completed",
+ output: [
+ {
+ type: "message",
+ role: "assistant",
+ content: longOutput,
+ },
+ ],
+ input: [
+ {
+ type: "message",
+ role: "user",
+ content: longInput,
+ },
+ ],
+ };
+
+ render(
+ ,
+ );
+
+ // The truncated text should be present for both input and output
+ const truncatedTexts = screen.getAllByText(
+ longInput.slice(0, 10) + "...",
+ );
+ expect(truncatedTexts.length).toBe(2); // one for input, one for output
+ truncatedTexts.forEach((textElement) =>
+ expect(textElement).toBeInTheDocument(),
+ );
+ });
+ });
+});
diff --git a/llama_stack/ui/components/responses/responses-table.tsx b/llama_stack/ui/components/responses/responses-table.tsx
new file mode 100644
index 000000000..352450d18
--- /dev/null
+++ b/llama_stack/ui/components/responses/responses-table.tsx
@@ -0,0 +1,117 @@
+"use client";
+
+import {
+ OpenAIResponse,
+ ResponseInput,
+ ResponseInputMessageContent,
+} from "@/lib/types";
+import { LogsTable, LogTableRow } from "@/components/logs/logs-table";
+import {
+ isMessageInput,
+ isMessageItem,
+ isFunctionCallItem,
+ isWebSearchCallItem,
+ MessageItem,
+ FunctionCallItem,
+ WebSearchCallItem,
+} from "./utils/item-types";
+
+interface ResponsesTableProps {
+ data: OpenAIResponse[];
+ isLoading: boolean;
+ error: Error | null;
+}
+
+function getInputText(response: OpenAIResponse): string {
+ const firstInput = response.input.find(isMessageInput);
+ if (firstInput) {
+ return extractContentFromItem(firstInput);
+ }
+ return "";
+}
+
+function getOutputText(response: OpenAIResponse): string {
+ const firstMessage = response.output.find((item) =>
+ isMessageItem(item as any),
+ );
+ if (firstMessage) {
+ const content = extractContentFromItem(firstMessage as MessageItem);
+ if (content) {
+ return content;
+ }
+ }
+
+ const functionCall = response.output.find((item) =>
+ isFunctionCallItem(item as any),
+ );
+ if (functionCall) {
+ return formatFunctionCall(functionCall as FunctionCallItem);
+ }
+
+ const webSearchCall = response.output.find((item) =>
+ isWebSearchCallItem(item as any),
+ );
+ if (webSearchCall) {
+ return formatWebSearchCall(webSearchCall as WebSearchCallItem);
+ }
+
+ return JSON.stringify(response.output);
+}
+
+function extractContentFromItem(item: {
+ content?: string | ResponseInputMessageContent[];
+}): string {
+ if (!item.content) {
+ return "";
+ }
+
+ if (typeof item.content === "string") {
+ return item.content;
+ } else if (Array.isArray(item.content)) {
+ const textContent = item.content.find(
+ (c: ResponseInputMessageContent) =>
+ c.type === "input_text" || c.type === "output_text",
+ );
+ return textContent?.text || "";
+ }
+ return "";
+}
+
+function formatFunctionCall(functionCall: FunctionCallItem): string {
+ const args = functionCall.arguments || "{}";
+ const name = functionCall.name || "unknown";
+ return `${name}(${args})`;
+}
+
+function formatWebSearchCall(webSearchCall: WebSearchCallItem): string {
+ return `web_search_call(status: ${webSearchCall.status})`;
+}
+
+function formatResponseToRow(response: OpenAIResponse): LogTableRow {
+ return {
+ id: response.id,
+ input: getInputText(response),
+ output: getOutputText(response),
+ model: response.model,
+ createdTime: new Date(response.created_at * 1000).toLocaleString(),
+ detailPath: `/logs/responses/${response.id}`,
+ };
+}
+
+export function ResponsesTable({
+ data,
+ isLoading,
+ error,
+}: ResponsesTableProps) {
+ const formattedData = data.map(formatResponseToRow);
+
+ return (
+
+ );
+}
diff --git a/llama_stack/ui/components/responses/utils/item-types.ts b/llama_stack/ui/components/responses/utils/item-types.ts
new file mode 100644
index 000000000..2bde49119
--- /dev/null
+++ b/llama_stack/ui/components/responses/utils/item-types.ts
@@ -0,0 +1,61 @@
+/**
+ * Type guards for different item types in responses
+ */
+
+import type {
+ ResponseInput,
+ ResponseOutput,
+ ResponseMessage,
+ ResponseToolCall,
+} from "@/lib/types";
+
+export interface BaseItem {
+ type: string;
+ [key: string]: unknown;
+}
+
+export type MessageItem = ResponseMessage;
+export type FunctionCallItem = ResponseToolCall & { type: "function_call" };
+export type WebSearchCallItem = ResponseToolCall & { type: "web_search_call" };
+export type FunctionCallOutputItem = BaseItem & {
+ type: "function_call_output";
+ call_id: string;
+ output?: string | object;
+};
+
+export type AnyResponseItem =
+ | ResponseInput
+ | ResponseOutput
+ | FunctionCallOutputItem;
+
+export function isMessageInput(
+ item: ResponseInput,
+): item is ResponseInput & { type: "message" } {
+ return item.type === "message";
+}
+
+export function isMessageItem(item: AnyResponseItem): item is MessageItem {
+ return item.type === "message" && "content" in item;
+}
+
+export function isFunctionCallItem(
+ item: AnyResponseItem,
+): item is FunctionCallItem {
+ return item.type === "function_call" && "name" in item;
+}
+
+export function isWebSearchCallItem(
+ item: AnyResponseItem,
+): item is WebSearchCallItem {
+ return item.type === "web_search_call";
+}
+
+export function isFunctionCallOutputItem(
+ item: AnyResponseItem,
+): item is FunctionCallOutputItem {
+ return (
+ item.type === "function_call_output" &&
+ "call_id" in item &&
+ typeof (item as any).call_id === "string"
+ );
+}
diff --git a/llama_stack/ui/components/ui/message-components.tsx b/llama_stack/ui/components/ui/message-components.tsx
new file mode 100644
index 000000000..50ccd623e
--- /dev/null
+++ b/llama_stack/ui/components/ui/message-components.tsx
@@ -0,0 +1,49 @@
+import React from "react";
+
+export interface MessageBlockProps {
+ label: string;
+ labelDetail?: string;
+ content: React.ReactNode;
+ className?: string;
+ contentClassName?: string;
+}
+
+export const MessageBlock: React.FC = ({
+ label,
+ labelDetail,
+ content,
+ className = "",
+ contentClassName = "",
+}) => {
+ return (
+
+
+ {label}
+ {labelDetail && (
+
+ {labelDetail}
+
+ )}
+
+
+ {content}
+
+
+ );
+};
+
+export interface ToolCallBlockProps {
+ children: React.ReactNode;
+ className?: string;
+}
+
+export const ToolCallBlock = ({ children, className }: ToolCallBlockProps) => {
+ const baseClassName =
+ "p-3 bg-slate-50 border border-slate-200 rounded-md text-sm";
+
+ return (
+
+ );
+};
diff --git a/llama_stack/ui/lib/client.ts b/llama_stack/ui/lib/client.ts
new file mode 100644
index 000000000..df2a8e2f2
--- /dev/null
+++ b/llama_stack/ui/lib/client.ts
@@ -0,0 +1,12 @@
+import LlamaStackClient from "llama-stack-client";
+import OpenAI from "openai";
+
+export const client =
+ process.env.NEXT_PUBLIC_USE_OPENAI_CLIENT === "true" // useful for testing
+ ? new OpenAI({
+ apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
+ dangerouslyAllowBrowser: true,
+ })
+ : new LlamaStackClient({
+ baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
+ });
diff --git a/llama_stack/ui/lib/format-message-content.ts b/llama_stack/ui/lib/format-message-content.ts
index abdfed7a1..3e7e03a12 100644
--- a/llama_stack/ui/lib/format-message-content.ts
+++ b/llama_stack/ui/lib/format-message-content.ts
@@ -43,10 +43,14 @@ export function extractDisplayableText(
return "";
}
- let textPart = extractTextFromContentPart(message.content);
+ const textPart = extractTextFromContentPart(message.content);
let toolCallPart = "";
- if (message.tool_calls && message.tool_calls.length > 0) {
+ if (
+ message.tool_calls &&
+ Array.isArray(message.tool_calls) &&
+ message.tool_calls.length > 0
+ ) {
// For summary, usually the first tool call is sufficient
toolCallPart = formatToolCallToString(message.tool_calls[0]);
}
diff --git a/llama_stack/ui/lib/types.ts b/llama_stack/ui/lib/types.ts
index 24f967bd9..e08fb8d82 100644
--- a/llama_stack/ui/lib/types.ts
+++ b/llama_stack/ui/lib/types.ts
@@ -18,20 +18,20 @@ export interface ImageUrlContentBlock {
export type ChatMessageContentPart =
| TextContentBlock
| ImageUrlContentBlock
- | { type: string; [key: string]: any }; // Fallback for other potential types
+ | { type: string; [key: string]: unknown }; // Fallback for other potential types
export interface ChatMessage {
role: string;
content: string | ChatMessageContentPart[]; // Updated content type
name?: string | null;
- tool_calls?: any | null; // This could also be refined to a more specific ToolCall[] type
+ tool_calls?: unknown | null; // This could also be refined to a more specific ToolCall[] type
}
export interface Choice {
message: ChatMessage;
finish_reason: string;
index: number;
- logprobs?: any | null;
+ logprobs?: unknown | null;
}
export interface ChatCompletion {
@@ -42,3 +42,62 @@ export interface ChatCompletion {
model: string;
input_messages: ChatMessage[];
}
+
+// Response types for OpenAI Responses API
+export interface ResponseInputMessageContent {
+ text?: string;
+ type: "input_text" | "input_image" | "output_text";
+ image_url?: string;
+ detail?: "low" | "high" | "auto";
+}
+
+export interface ResponseMessage {
+ content: string | ResponseInputMessageContent[];
+ role: "system" | "developer" | "user" | "assistant";
+ type: "message";
+ id?: string;
+ status?: string;
+}
+
+export interface ResponseToolCall {
+ id: string;
+ status: string;
+ type: "web_search_call" | "function_call";
+ arguments?: string;
+ call_id?: string;
+ name?: string;
+}
+
+export type ResponseOutput = ResponseMessage | ResponseToolCall;
+
+export interface ResponseInput {
+ type: string;
+ content?: string | ResponseInputMessageContent[];
+ role?: string;
+ [key: string]: unknown; // Flexible for various input types
+}
+
+export interface OpenAIResponse {
+ id: string;
+ created_at: number;
+ model: string;
+ object: "response";
+ status: string;
+ output: ResponseOutput[];
+ input: ResponseInput[];
+ error?: {
+ code: string;
+ message: string;
+ };
+ parallel_tool_calls?: boolean;
+ previous_response_id?: string;
+ temperature?: number;
+ top_p?: number;
+ truncation?: string;
+ user?: string;
+}
+
+export interface InputItemListResponse {
+ data: ResponseInput[];
+ object: "list";
+}
diff --git a/llama_stack/ui/package-lock.json b/llama_stack/ui/package-lock.json
index 199bd17a1..931faa60a 100644
--- a/llama_stack/ui/package-lock.json
+++ b/llama_stack/ui/package-lock.json
@@ -19,6 +19,7 @@
"lucide-react": "^0.510.0",
"next": "15.3.2",
"next-themes": "^0.4.6",
+ "openai": "^4.103.0",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"tailwind-merge": "^3.3.0"
@@ -9092,7 +9093,7 @@
},
"node_modules/llama-stack-client": {
"version": "0.0.1-alpha.0",
- "resolved": "git+ssh://git@github.com/stainless-sdks/llama-stack-node.git#efa814980d44b3b2c92944377a086915137b2134",
+ "resolved": "git+ssh://git@github.com/stainless-sdks/llama-stack-node.git#5d34d229fb53b6dad02da0f19f4b310b529c6b15",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^18.11.18",
@@ -9804,6 +9805,51 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
+ "node_modules/openai": {
+ "version": "4.103.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.103.0.tgz",
+ "integrity": "sha512-eWcz9kdurkGOFDtd5ySS5y251H2uBgq9+1a2lTBnjMMzlexJ40Am5t6Mu76SSE87VvitPa0dkIAp75F+dZVC0g==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ },
+ "peerDependencies": {
+ "ws": "^8.18.0",
+ "zod": "^3.23.8"
+ },
+ "peerDependenciesMeta": {
+ "ws": {
+ "optional": true
+ },
+ "zod": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/openai/node_modules/@types/node": {
+ "version": "18.19.103",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.103.tgz",
+ "integrity": "sha512-hHTHp+sEz6SxFsp+SA+Tqrua3AbmlAw+Y//aEwdHrdZkYVRWdvWD3y5uPZ0flYOkgskaFWqZ/YGFm3FaFQ0pRw==",
+ "license": "MIT",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
+ "node_modules/openai/node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
+ "license": "MIT"
+ },
"node_modules/optionator": {
"version": "0.9.4",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
@@ -12223,7 +12269,7 @@
"version": "8.18.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz",
"integrity": "sha512-DMricUmwGZUVr++AEAe2uiVM7UoO9MAVZMDu05UQOaUII0lp+zOzLLU4Xqh/JvTqklB1T4uELaaPBKyjE1r4fQ==",
- "dev": true,
+ "devOptional": true,
"license": "MIT",
"engines": {
"node": ">=10.0.0"
@@ -12334,7 +12380,7 @@
"version": "3.24.4",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.24.4.tgz",
"integrity": "sha512-OdqJE9UDRPwWsrHjLN2F8bPxvwJBK22EHLWtanu0LSYr5YqzsaaW3RMgmjwr8Rypg5k+meEJdSPXJZXE/yqOMg==",
- "dev": true,
+ "devOptional": true,
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"