forked from phoenix-oss/llama-stack-mirror
feat(ui): add views for Responses (#2293)
# What does this PR do? * Add responses list and detail views * Refactored components to be shared as much as possible between chat completions and responses ## Test Plan <img width="2014" alt="image" src="https://github.com/user-attachments/assets/6dee12ea-8876-4351-a6eb-2338058466ef" /> <img width="2021" alt="image" src="https://github.com/user-attachments/assets/6c7c71b8-25b7-4199-9c57-6960be5580c8" /> added tests
This commit is contained in:
parent
6352078e4b
commit
56e5ddb39f
34 changed files with 3282 additions and 380 deletions
|
@ -2,9 +2,9 @@
|
|||
|
||||
import { useEffect, useState } from "react";
|
||||
import { useParams } from "next/navigation";
|
||||
import LlamaStackClient from "llama-stack-client";
|
||||
import { ChatCompletion } from "@/lib/types";
|
||||
import { ChatCompletionDetailView } from "@/components/chat-completions/chat-completion-detail";
|
||||
import { client } from "@/lib/client";
|
||||
|
||||
export default function ChatCompletionDetailPage() {
|
||||
const params = useParams();
|
||||
|
@ -22,10 +22,6 @@ export default function ChatCompletionDetailPage() {
|
|||
return;
|
||||
}
|
||||
|
||||
const client = new LlamaStackClient({
|
||||
baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
|
||||
});
|
||||
|
||||
const fetchCompletionDetail = async () => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
|
|
@ -1,45 +1,19 @@
|
|||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import { usePathname, useParams } from "next/navigation";
|
||||
import {
|
||||
PageBreadcrumb,
|
||||
BreadcrumbSegment,
|
||||
} from "@/components/layout/page-breadcrumb";
|
||||
import { truncateText } from "@/lib/truncate-text";
|
||||
import LogsLayout from "@/components/layout/logs-layout";
|
||||
|
||||
export default function ChatCompletionsLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
const pathname = usePathname();
|
||||
const params = useParams();
|
||||
|
||||
let segments: BreadcrumbSegment[] = [];
|
||||
|
||||
// Default for /logs/chat-completions
|
||||
if (pathname === "/logs/chat-completions") {
|
||||
segments = [{ label: "Chat Completions" }];
|
||||
}
|
||||
|
||||
// For /logs/chat-completions/[id]
|
||||
const idParam = params?.id;
|
||||
if (idParam && typeof idParam === "string") {
|
||||
segments = [
|
||||
{ label: "Chat Completions", href: "/logs/chat-completions" },
|
||||
{ label: `Details (${truncateText(idParam, 20)})` },
|
||||
];
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="container mx-auto p-4">
|
||||
<>
|
||||
{segments.length > 0 && (
|
||||
<PageBreadcrumb segments={segments} className="mb-4" />
|
||||
)}
|
||||
{children}
|
||||
</>
|
||||
</div>
|
||||
<LogsLayout
|
||||
sectionLabel="Chat Completions"
|
||||
basePath="/logs/chat-completions"
|
||||
>
|
||||
{children}
|
||||
</LogsLayout>
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
"use client";
|
||||
|
||||
import { useEffect, useState } from "react";
|
||||
import LlamaStackClient from "llama-stack-client";
|
||||
import { ChatCompletion } from "@/lib/types";
|
||||
import { ChatCompletionsTable } from "@/components/chat-completions/chat-completion-table";
|
||||
import { ChatCompletionsTable } from "@/components/chat-completions/chat-completions-table";
|
||||
import { client } from "@/lib/client";
|
||||
|
||||
export default function ChatCompletionsPage() {
|
||||
const [completions, setCompletions] = useState<ChatCompletion[]>([]);
|
||||
|
@ -11,9 +11,6 @@ export default function ChatCompletionsPage() {
|
|||
const [error, setError] = useState<Error | null>(null);
|
||||
|
||||
useEffect(() => {
|
||||
const client = new LlamaStackClient({
|
||||
baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
|
||||
});
|
||||
const fetchCompletions = async () => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
|
@ -21,7 +18,7 @@ export default function ChatCompletionsPage() {
|
|||
const response = await client.chat.completions.list();
|
||||
const data = Array.isArray(response)
|
||||
? response
|
||||
: (response as any).data;
|
||||
: (response as { data: ChatCompletion[] }).data;
|
||||
|
||||
if (Array.isArray(data)) {
|
||||
setCompletions(data);
|
||||
|
@ -46,7 +43,7 @@ export default function ChatCompletionsPage() {
|
|||
|
||||
return (
|
||||
<ChatCompletionsTable
|
||||
completions={completions}
|
||||
data={completions}
|
||||
isLoading={isLoading}
|
||||
error={error}
|
||||
/>
|
||||
|
|
125
llama_stack/ui/app/logs/responses/[id]/page.tsx
Normal file
125
llama_stack/ui/app/logs/responses/[id]/page.tsx
Normal file
|
@ -0,0 +1,125 @@
|
|||
"use client";
|
||||
|
||||
import { useEffect, useState } from "react";
|
||||
import { useParams } from "next/navigation";
|
||||
import type { ResponseObject } from "llama-stack-client/resources/responses/responses";
|
||||
import { OpenAIResponse, InputItemListResponse } from "@/lib/types";
|
||||
import { ResponseDetailView } from "@/components/responses/responses-detail";
|
||||
import { client } from "@/lib/client";
|
||||
|
||||
export default function ResponseDetailPage() {
|
||||
const params = useParams();
|
||||
const id = params.id as string;
|
||||
|
||||
const [responseDetail, setResponseDetail] = useState<OpenAIResponse | null>(
|
||||
null,
|
||||
);
|
||||
const [inputItems, setInputItems] = useState<InputItemListResponse | null>(
|
||||
null,
|
||||
);
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true);
|
||||
const [isLoadingInputItems, setIsLoadingInputItems] = useState<boolean>(true);
|
||||
const [error, setError] = useState<Error | null>(null);
|
||||
const [inputItemsError, setInputItemsError] = useState<Error | null>(null);
|
||||
|
||||
// Helper function to convert ResponseObject to OpenAIResponse
|
||||
const convertResponseObject = (
|
||||
responseData: ResponseObject,
|
||||
): OpenAIResponse => {
|
||||
return {
|
||||
id: responseData.id,
|
||||
created_at: responseData.created_at,
|
||||
model: responseData.model,
|
||||
object: responseData.object,
|
||||
status: responseData.status,
|
||||
output: responseData.output as OpenAIResponse["output"],
|
||||
input: [], // ResponseObject doesn't include input; component uses inputItems prop instead
|
||||
error: responseData.error,
|
||||
parallel_tool_calls: responseData.parallel_tool_calls,
|
||||
previous_response_id: responseData.previous_response_id,
|
||||
temperature: responseData.temperature,
|
||||
top_p: responseData.top_p,
|
||||
truncation: responseData.truncation,
|
||||
user: responseData.user,
|
||||
};
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (!id) {
|
||||
setError(new Error("Response ID is missing."));
|
||||
setIsLoading(false);
|
||||
return;
|
||||
}
|
||||
|
||||
const fetchResponseDetail = async () => {
|
||||
setIsLoading(true);
|
||||
setIsLoadingInputItems(true);
|
||||
setError(null);
|
||||
setInputItemsError(null);
|
||||
setResponseDetail(null);
|
||||
setInputItems(null);
|
||||
|
||||
try {
|
||||
const [responseResult, inputItemsResult] = await Promise.allSettled([
|
||||
client.responses.retrieve(id),
|
||||
client.responses.inputItems.list(id, { order: "asc" }),
|
||||
]);
|
||||
|
||||
// Handle response detail result
|
||||
if (responseResult.status === "fulfilled") {
|
||||
const convertedResponse = convertResponseObject(responseResult.value);
|
||||
setResponseDetail(convertedResponse);
|
||||
} else {
|
||||
console.error(
|
||||
`Error fetching response detail for ID ${id}:`,
|
||||
responseResult.reason,
|
||||
);
|
||||
setError(
|
||||
responseResult.reason instanceof Error
|
||||
? responseResult.reason
|
||||
: new Error("Failed to fetch response detail"),
|
||||
);
|
||||
}
|
||||
|
||||
// Handle input items result
|
||||
if (inputItemsResult.status === "fulfilled") {
|
||||
const inputItemsData =
|
||||
inputItemsResult.value as unknown as InputItemListResponse;
|
||||
setInputItems(inputItemsData);
|
||||
} else {
|
||||
console.error(
|
||||
`Error fetching input items for response ID ${id}:`,
|
||||
inputItemsResult.reason,
|
||||
);
|
||||
setInputItemsError(
|
||||
inputItemsResult.reason instanceof Error
|
||||
? inputItemsResult.reason
|
||||
: new Error("Failed to fetch input items"),
|
||||
);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Unexpected error fetching data for ID ${id}:`, err);
|
||||
setError(
|
||||
err instanceof Error ? err : new Error("Unexpected error occurred"),
|
||||
);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
setIsLoadingInputItems(false);
|
||||
}
|
||||
};
|
||||
|
||||
fetchResponseDetail();
|
||||
}, [id]);
|
||||
|
||||
return (
|
||||
<ResponseDetailView
|
||||
response={responseDetail}
|
||||
inputItems={inputItems}
|
||||
isLoading={isLoading}
|
||||
isLoadingInputItems={isLoadingInputItems}
|
||||
error={error}
|
||||
inputItemsError={inputItemsError}
|
||||
id={id}
|
||||
/>
|
||||
);
|
||||
}
|
16
llama_stack/ui/app/logs/responses/layout.tsx
Normal file
16
llama_stack/ui/app/logs/responses/layout.tsx
Normal file
|
@ -0,0 +1,16 @@
|
|||
"use client";
|
||||
|
||||
import React from "react";
|
||||
import LogsLayout from "@/components/layout/logs-layout";
|
||||
|
||||
export default function ResponsesLayout({
|
||||
children,
|
||||
}: {
|
||||
children: React.ReactNode;
|
||||
}) {
|
||||
return (
|
||||
<LogsLayout sectionLabel="Responses" basePath="/logs/responses">
|
||||
{children}
|
||||
</LogsLayout>
|
||||
);
|
||||
}
|
|
@ -1,7 +1,66 @@
|
|||
export default function Responses() {
|
||||
"use client";
|
||||
|
||||
import { useEffect, useState } from "react";
|
||||
import type { ResponseListResponse } from "llama-stack-client/resources/responses/responses";
|
||||
import { OpenAIResponse } from "@/lib/types";
|
||||
import { ResponsesTable } from "@/components/responses/responses-table";
|
||||
import { client } from "@/lib/client";
|
||||
|
||||
export default function ResponsesPage() {
|
||||
const [responses, setResponses] = useState<OpenAIResponse[]>([]);
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true);
|
||||
const [error, setError] = useState<Error | null>(null);
|
||||
|
||||
// Helper function to convert ResponseListResponse.Data to OpenAIResponse
|
||||
const convertResponseListData = (
|
||||
responseData: ResponseListResponse.Data,
|
||||
): OpenAIResponse => {
|
||||
return {
|
||||
id: responseData.id,
|
||||
created_at: responseData.created_at,
|
||||
model: responseData.model,
|
||||
object: responseData.object,
|
||||
status: responseData.status,
|
||||
output: responseData.output as OpenAIResponse["output"],
|
||||
input: responseData.input as OpenAIResponse["input"],
|
||||
error: responseData.error,
|
||||
parallel_tool_calls: responseData.parallel_tool_calls,
|
||||
previous_response_id: responseData.previous_response_id,
|
||||
temperature: responseData.temperature,
|
||||
top_p: responseData.top_p,
|
||||
truncation: responseData.truncation,
|
||||
user: responseData.user,
|
||||
};
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const fetchResponses = async () => {
|
||||
setIsLoading(true);
|
||||
setError(null);
|
||||
try {
|
||||
const response = await client.responses.list();
|
||||
const responseListData = response as ResponseListResponse;
|
||||
|
||||
const convertedResponses: OpenAIResponse[] = responseListData.data.map(
|
||||
convertResponseListData,
|
||||
);
|
||||
|
||||
setResponses(convertedResponses);
|
||||
} catch (err) {
|
||||
console.error("Error fetching responses:", err);
|
||||
setError(
|
||||
err instanceof Error ? err : new Error("Failed to fetch responses"),
|
||||
);
|
||||
setResponses([]);
|
||||
} finally {
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
fetchResponses();
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div>
|
||||
<h1>Under Construction</h1>
|
||||
</div>
|
||||
<ResponsesTable data={responses} isLoading={isLoading} error={error} />
|
||||
);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue