feat(ui): add infinite scroll pagination to chat completions/responses logs table (#2466)
Some checks failed
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 2s
Integration Tests / test-matrix (http, 3.10, inspect) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.10, providers) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.10, datasets) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.10, post_training) (push) Failing after 6s
Integration Tests / test-matrix (http, 3.11, datasets) (push) Failing after 4s
Integration Tests / test-matrix (http, 3.10, tool_runtime) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.11, agents) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.11, inference) (push) Failing after 5s
Integration Tests / test-matrix (http, 3.10, vector_io) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.11, inspect) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.11, post_training) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.11, tool_runtime) (push) Failing after 6s
Integration Tests / test-matrix (http, 3.10, agents) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.11, vector_io) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.12, inspect) (push) Failing after 6s
Integration Tests / test-matrix (http, 3.11, scoring) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.12, datasets) (push) Failing after 9s
Integration Tests / test-matrix (http, 3.12, inference) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.10, scoring) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.10, inference) (push) Failing after 11s
Integration Tests / test-matrix (http, 3.12, post_training) (push) Failing after 8s
Integration Tests / test-matrix (http, 3.12, tool_runtime) (push) Failing after 9s
Integration Tests / test-matrix (http, 3.12, agents) (push) Failing after 11s
Integration Tests / test-matrix (http, 3.12, scoring) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.12, vector_io) (push) Failing after 7s
Integration Tests / test-matrix (http, 3.12, providers) (push) Failing after 9s
Integration Tests / test-matrix (library, 3.10, agents) (push) Failing after 9s
Integration Tests / test-matrix (library, 3.10, datasets) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.10, inference) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.10, providers) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.10, post_training) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, datasets) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.10, scoring) (push) Failing after 10s
Integration Tests / test-matrix (http, 3.11, providers) (push) Failing after 11s
Integration Tests / test-matrix (library, 3.10, inspect) (push) Failing after 10s
Integration Tests / test-matrix (library, 3.10, vector_io) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.10, tool_runtime) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, agents) (push) Failing after 8s
Integration Tests / test-matrix (library, 3.11, post_training) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.11, inspect) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, inference) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.11, tool_runtime) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.11, providers) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.11, scoring) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.11, vector_io) (push) Failing after 7s
Integration Tests / test-matrix (library, 3.12, datasets) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.12, agents) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.12, inference) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.12, inspect) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.12, post_training) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.12, providers) (push) Failing after 6s
Integration Tests / test-matrix (library, 3.12, scoring) (push) Failing after 5s
Integration Tests / test-matrix (library, 3.12, tool_runtime) (push) Failing after 5s
Test External Providers / test-external-providers (venv) (push) Failing after 16s
Integration Tests / test-matrix (library, 3.12, vector_io) (push) Failing after 20s
Unit Tests / unit-tests (3.11) (push) Failing after 16s
Unit Tests / unit-tests (3.13) (push) Failing after 14s
Unit Tests / unit-tests (3.10) (push) Failing after 48s
Unit Tests / unit-tests (3.12) (push) Failing after 46s
Pre-commit / pre-commit (push) Successful in 1m23s

## Summary:

This commit adds infinite scroll pagination to the chat completions and
responses tables.


## Test Plan:
  1. Run unit tests: npm run test
  2. Manual testing: Navigate to chat
  completions/responses pages
  3. Verify infinite scroll triggers when approaching
  bottom
  4. Added playwright tests: npm run test:e2e
This commit is contained in:
ehhuang 2025-06-18 15:28:39 -07:00 committed by GitHub
parent 90d03552d4
commit e6bfc717cb
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 1145 additions and 388 deletions

2
.gitignore vendored
View file

@ -26,3 +26,5 @@ venv/
pytest-report.xml
.coverage
.python-version
CLAUDE.md
.claude/

View file

@ -39,3 +39,6 @@ yarn-error.log*
# typescript
*.tsbuildinfo
next-env.d.ts
# playwright
.last-run.json

View file

@ -1,51 +1,7 @@
"use client";
import { useEffect, useState } from "react";
import { ChatCompletion } from "@/lib/types";
import { ChatCompletionsTable } from "@/components/chat-completions/chat-completions-table";
import { client } from "@/lib/client";
export default function ChatCompletionsPage() {
const [completions, setCompletions] = useState<ChatCompletion[]>([]);
const [isLoading, setIsLoading] = useState<boolean>(true);
const [error, setError] = useState<Error | null>(null);
useEffect(() => {
const fetchCompletions = async () => {
setIsLoading(true);
setError(null);
try {
const response = await client.chat.completions.list();
const data = Array.isArray(response)
? response
: (response as { data: ChatCompletion[] }).data;
if (Array.isArray(data)) {
setCompletions(data);
} else {
console.error("Unexpected response structure:", response);
setError(new Error("Unexpected response structure"));
setCompletions([]);
}
} catch (err) {
console.error("Error fetching chat completions:", err);
setError(
err instanceof Error ? err : new Error("Failed to fetch completions"),
);
setCompletions([]);
} finally {
setIsLoading(false);
}
};
fetchCompletions();
}, []);
return (
<ChatCompletionsTable
data={completions}
isLoading={isLoading}
error={error}
/>
);
return <ChatCompletionsTable paginationOptions={{ limit: 20 }} />;
}

View file

@ -1,66 +1,7 @@
"use client";
import { useEffect, useState } from "react";
import type { ResponseListResponse } from "llama-stack-client/resources/responses/responses";
import { OpenAIResponse } from "@/lib/types";
import { ResponsesTable } from "@/components/responses/responses-table";
import { client } from "@/lib/client";
export default function ResponsesPage() {
const [responses, setResponses] = useState<OpenAIResponse[]>([]);
const [isLoading, setIsLoading] = useState<boolean>(true);
const [error, setError] = useState<Error | null>(null);
// Helper function to convert ResponseListResponse.Data to OpenAIResponse
const convertResponseListData = (
responseData: ResponseListResponse.Data,
): OpenAIResponse => {
return {
id: responseData.id,
created_at: responseData.created_at,
model: responseData.model,
object: responseData.object,
status: responseData.status,
output: responseData.output as OpenAIResponse["output"],
input: responseData.input as OpenAIResponse["input"],
error: responseData.error,
parallel_tool_calls: responseData.parallel_tool_calls,
previous_response_id: responseData.previous_response_id,
temperature: responseData.temperature,
top_p: responseData.top_p,
truncation: responseData.truncation,
user: responseData.user,
};
};
useEffect(() => {
const fetchResponses = async () => {
setIsLoading(true);
setError(null);
try {
const response = await client.responses.list();
const responseListData = response as ResponseListResponse;
const convertedResponses: OpenAIResponse[] = responseListData.data.map(
convertResponseListData,
);
setResponses(convertedResponses);
} catch (err) {
console.error("Error fetching responses:", err);
setError(
err instanceof Error ? err : new Error("Failed to fetch responses"),
);
setResponses([]);
} finally {
setIsLoading(false);
}
};
fetchResponses();
}, []);
return (
<ResponsesTable data={responses} isLoading={isLoading} error={error} />
);
return <ResponsesTable paginationOptions={{ limit: 20 }} />;
}

View file

@ -16,6 +16,29 @@ jest.mock("next/navigation", () => ({
jest.mock("@/lib/truncate-text");
jest.mock("@/lib/format-message-content");
// Mock the client
jest.mock("@/lib/client", () => ({
client: {
chat: {
completions: {
list: jest.fn(),
},
},
},
}));
// Mock the usePagination hook
const mockLoadMore = jest.fn();
jest.mock("@/hooks/usePagination", () => ({
usePagination: jest.fn(() => ({
data: [],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
})),
}));
// Import the mocked functions to set up default or specific implementations
import { truncateText as originalTruncateText } from "@/lib/truncate-text";
import {
@ -23,6 +46,12 @@ import {
extractDisplayableText as originalExtractDisplayableText,
} from "@/lib/format-message-content";
// Import the mocked hook
import { usePagination } from "@/hooks/usePagination";
const mockedUsePagination = usePagination as jest.MockedFunction<
typeof usePagination
>;
// Cast to jest.Mock for typings
const truncateText = originalTruncateText as jest.Mock;
const extractTextFromContentPart =
@ -30,11 +59,7 @@ const extractTextFromContentPart =
const extractDisplayableText = originalExtractDisplayableText as jest.Mock;
describe("ChatCompletionsTable", () => {
const defaultProps = {
data: [] as ChatCompletion[],
isLoading: false,
error: null,
};
const defaultProps = {};
beforeEach(() => {
// Reset all mocks before each test
@ -42,16 +67,27 @@ describe("ChatCompletionsTable", () => {
truncateText.mockClear();
extractTextFromContentPart.mockClear();
extractDisplayableText.mockClear();
mockLoadMore.mockClear();
jest.clearAllMocks();
// Default pass-through implementations
truncateText.mockImplementation((text: string | undefined) => text);
extractTextFromContentPart.mockImplementation((content: unknown) =>
typeof content === "string" ? content : "extracted text",
);
extractDisplayableText.mockImplementation(
(message: unknown) =>
(message as { content?: string })?.content || "extracted output",
);
extractDisplayableText.mockImplementation((message: unknown) => {
const msg = message as { content?: string };
return msg?.content || "extracted output";
});
// Default hook return value
mockedUsePagination.mockReturnValue({
data: [],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
});
test("renders without crashing with default props", () => {
@ -60,41 +96,56 @@ describe("ChatCompletionsTable", () => {
});
test("click on a row navigates to the correct URL", () => {
const mockCompletion: ChatCompletion = {
id: "comp_123",
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "llama-test-model",
choices: [
{
index: 0,
message: { role: "assistant", content: "Test output" },
finish_reason: "stop",
},
],
input_messages: [{ role: "user", content: "Test input" }],
};
const mockData: ChatCompletion[] = [
{
id: "completion_123",
choices: [
{
message: { role: "assistant", content: "Test response" },
finish_reason: "stop",
index: 0,
},
],
object: "chat.completion",
created: 1234567890,
model: "test-model",
input_messages: [{ role: "user", content: "Test prompt" }],
},
];
// Set up mocks to return expected values
extractTextFromContentPart.mockReturnValue("Test input");
extractDisplayableText.mockReturnValue("Test output");
// Configure the mock to return our test data
mockedUsePagination.mockReturnValue({
data: mockData,
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ChatCompletionsTable {...defaultProps} data={[mockCompletion]} />);
render(<ChatCompletionsTable {...defaultProps} />);
const row = screen.getByText("Test input").closest("tr");
const row = screen.getByText("Test prompt").closest("tr");
if (row) {
fireEvent.click(row);
expect(mockPush).toHaveBeenCalledWith("/logs/chat-completions/comp_123");
expect(mockPush).toHaveBeenCalledWith(
"/logs/chat-completions/completion_123",
);
} else {
throw new Error('Row with "Test input" not found for router mock test.');
throw new Error('Row with "Test prompt" not found for router mock test.');
}
});
describe("Loading State", () => {
test("renders skeleton UI when isLoading is true", () => {
const { container } = render(
<ChatCompletionsTable {...defaultProps} isLoading={true} />,
);
mockedUsePagination.mockReturnValue({
data: [],
status: "loading",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
const { container } = render(<ChatCompletionsTable {...defaultProps} />);
// Check for skeleton in the table caption
const tableCaption = container.querySelector("caption");
@ -121,40 +172,48 @@ describe("ChatCompletionsTable", () => {
describe("Error State", () => {
test("renders error message when error prop is provided", () => {
const errorMessage = "Network Error";
render(
<ChatCompletionsTable
{...defaultProps}
error={{ name: "Error", message: errorMessage }}
/>,
);
mockedUsePagination.mockReturnValue({
data: [],
status: "error",
hasMore: false,
error: { name: "Error", message: errorMessage } as Error,
loadMore: mockLoadMore,
});
render(<ChatCompletionsTable {...defaultProps} />);
expect(
screen.getByText(`Error fetching data: ${errorMessage}`),
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument();
});
test("renders default error message when error.message is not available", () => {
render(
<ChatCompletionsTable
{...defaultProps}
error={{ name: "Error", message: "" }}
/>,
);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
});
test.each([{ name: "Error", message: "" }, {}])(
"renders default error message when error has no message",
(errorObject) => {
mockedUsePagination.mockReturnValue({
data: [],
status: "error",
hasMore: false,
error: errorObject as Error,
loadMore: mockLoadMore,
});
test("renders default error message when error prop is an object without message", () => {
render(<ChatCompletionsTable {...defaultProps} error={{} as Error} />);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
});
render(<ChatCompletionsTable {...defaultProps} />);
expect(
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(
screen.getByText(
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument();
},
);
});
describe("Empty State", () => {
test('renders "No chat completions found." and no table when data array is empty', () => {
render(<ChatCompletionsTable data={[]} isLoading={false} error={null} />);
render(<ChatCompletionsTable {...defaultProps} />);
expect(
screen.getByText("No chat completions found."),
).toBeInTheDocument();
@ -167,7 +226,7 @@ describe("ChatCompletionsTable", () => {
describe("Data Rendering", () => {
test("renders table caption, headers, and completion data correctly", () => {
const mockCompletions = [
const mockCompletions: ChatCompletion[] = [
{
id: "comp_1",
object: "chat.completion",
@ -211,13 +270,15 @@ describe("ChatCompletionsTable", () => {
return "extracted output";
});
render(
<ChatCompletionsTable
data={mockCompletions}
isLoading={false}
error={null}
/>,
);
mockedUsePagination.mockReturnValue({
data: mockCompletions,
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ChatCompletionsTable {...defaultProps} />);
// Table caption
expect(
@ -268,7 +329,7 @@ describe("ChatCompletionsTable", () => {
extractTextFromContentPart.mockReturnValue(longInput);
extractDisplayableText.mockReturnValue(longOutput);
const mockCompletions = [
const mockCompletions: ChatCompletion[] = [
{
id: "comp_trunc",
object: "chat.completion",
@ -285,63 +346,72 @@ describe("ChatCompletionsTable", () => {
},
];
render(
<ChatCompletionsTable
data={mockCompletions}
isLoading={false}
error={null}
/>,
);
mockedUsePagination.mockReturnValue({
data: mockCompletions,
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ChatCompletionsTable {...defaultProps} />);
// The truncated text should be present for both input and output
const truncatedTexts = screen.getAllByText(
longInput.slice(0, 10) + "...",
);
expect(truncatedTexts.length).toBe(2); // one for input, one for output
truncatedTexts.forEach((textElement) =>
expect(textElement).toBeInTheDocument(),
);
});
test("uses content extraction functions correctly", () => {
const mockCompletion = {
id: "comp_extract",
object: "chat.completion",
created: 1710003000,
model: "llama-extract-model",
choices: [
{
index: 0,
message: { role: "assistant", content: "Extracted output" },
finish_reason: "stop",
},
],
input_messages: [{ role: "user", content: "Extracted input" }],
const complexMessage = [
{ type: "text", text: "Extracted input" },
{ type: "image", url: "http://example.com/image.png" },
];
const assistantMessage = {
role: "assistant",
content: "Extracted output from assistant",
};
const mockCompletions: ChatCompletion[] = [
{
id: "comp_extract",
object: "chat.completion",
created: 1710003000,
model: "llama-extract-model",
choices: [
{
index: 0,
message: assistantMessage,
finish_reason: "stop",
},
],
input_messages: [{ role: "user", content: complexMessage }],
},
];
extractTextFromContentPart.mockReturnValue("Extracted input");
extractDisplayableText.mockReturnValue("Extracted output");
extractDisplayableText.mockReturnValue("Extracted output from assistant");
render(
<ChatCompletionsTable
data={[mockCompletion]}
isLoading={false}
error={null}
/>,
);
// Verify the extraction functions were called
expect(extractTextFromContentPart).toHaveBeenCalledWith(
"Extracted input",
);
expect(extractDisplayableText).toHaveBeenCalledWith({
role: "assistant",
content: "Extracted output",
mockedUsePagination.mockReturnValue({
data: mockCompletions,
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
// Verify the extracted content is displayed
render(<ChatCompletionsTable {...defaultProps} />);
// Verify the extraction functions were called
expect(extractTextFromContentPart).toHaveBeenCalledWith(complexMessage);
expect(extractDisplayableText).toHaveBeenCalledWith(assistantMessage);
// Verify the extracted text appears in the table
expect(screen.getByText("Extracted input")).toBeInTheDocument();
expect(screen.getByText("Extracted output")).toBeInTheDocument();
expect(
screen.getByText("Extracted output from assistant"),
).toBeInTheDocument();
});
});
});

View file

@ -1,16 +1,21 @@
"use client";
import { ChatCompletion } from "@/lib/types";
import {
ChatCompletion,
UsePaginationOptions,
ListChatCompletionsResponse,
} from "@/lib/types";
import { LogsTable, LogTableRow } from "@/components/logs/logs-table";
import {
extractTextFromContentPart,
extractDisplayableText,
} from "@/lib/format-message-content";
import { usePagination } from "@/hooks/usePagination";
import { client } from "@/lib/client";
interface ChatCompletionsTableProps {
data: ChatCompletion[];
isLoading: boolean;
error: Error | null;
/** Optional pagination configuration */
paginationOptions?: UsePaginationOptions;
}
function formatChatCompletionToRow(completion: ChatCompletion): LogTableRow {
@ -25,17 +30,39 @@ function formatChatCompletionToRow(completion: ChatCompletion): LogTableRow {
}
export function ChatCompletionsTable({
data,
isLoading,
error,
paginationOptions,
}: ChatCompletionsTableProps) {
const fetchFunction = async (params: {
after?: string;
limit: number;
model?: string;
order?: string;
}) => {
const response = await client.chat.completions.list({
after: params.after,
limit: params.limit,
...(params.model && { model: params.model }),
...(params.order && { order: params.order }),
} as any);
return response as ListChatCompletionsResponse;
};
const { data, status, hasMore, error, loadMore } = usePagination({
...paginationOptions,
fetchFunction,
errorMessagePrefix: "chat completions",
});
const formattedData = data.map(formatChatCompletionToRow);
return (
<LogsTable
data={formattedData}
isLoading={isLoading}
status={status}
hasMore={hasMore}
error={error}
onLoadMore={loadMore}
caption="A list of your recent chat completions."
emptyMessage="No chat completions found."
/>

View file

@ -37,13 +37,11 @@ export default function LogsLayout({
}
return (
<div className="container mx-auto p-4">
<>
{segments.length > 0 && (
<PageBreadcrumb segments={segments} className="mb-4" />
)}
{children}
</>
<div className="container mx-auto p-4 h-[calc(100vh-64px)] flex flex-col">
{segments.length > 0 && (
<PageBreadcrumb segments={segments} className="mb-4" />
)}
<div className="flex-1 min-h-0 flex flex-col">{children}</div>
</div>
);
}

View file

@ -0,0 +1,142 @@
import React from "react";
import { render, waitFor } from "@testing-library/react";
import "@testing-library/jest-dom";
import { LogsTable, LogTableRow } from "./logs-table";
import { PaginationStatus } from "@/lib/types";
// Mock next/navigation
jest.mock("next/navigation", () => ({
useRouter: () => ({
push: jest.fn(),
}),
}));
// Mock the useInfiniteScroll hook
jest.mock("@/hooks/useInfiniteScroll", () => ({
useInfiniteScroll: jest.fn((onLoadMore, options) => {
const ref = React.useRef(null);
React.useEffect(() => {
// Simulate the observer behavior
if (options?.enabled && onLoadMore) {
// Trigger load after a delay to simulate intersection
const timeout = setTimeout(() => {
onLoadMore();
}, 100);
return () => clearTimeout(timeout);
}
}, [options?.enabled, onLoadMore]);
return ref;
}),
}));
// IntersectionObserver mock is already in jest.setup.ts
describe("LogsTable Viewport Loading", () => {
const mockData: LogTableRow[] = Array.from({ length: 10 }, (_, i) => ({
id: `row_${i}`,
input: `Input ${i}`,
output: `Output ${i}`,
model: "test-model",
createdTime: new Date().toISOString(),
detailPath: `/logs/test/${i}`,
}));
const defaultProps = {
data: mockData,
status: "idle" as PaginationStatus,
hasMore: true,
error: null,
caption: "Test table",
emptyMessage: "No data",
};
beforeEach(() => {
jest.clearAllMocks();
});
test("should trigger loadMore when sentinel is visible", async () => {
const mockLoadMore = jest.fn();
render(<LogsTable {...defaultProps} onLoadMore={mockLoadMore} />);
// Wait for the intersection observer to trigger
await waitFor(
() => {
expect(mockLoadMore).toHaveBeenCalled();
},
{ timeout: 300 },
);
expect(mockLoadMore).toHaveBeenCalledTimes(1);
});
test("should not trigger loadMore when already loading", async () => {
const mockLoadMore = jest.fn();
render(
<LogsTable
{...defaultProps}
status="loading-more"
onLoadMore={mockLoadMore}
/>,
);
// Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled();
});
test("should not trigger loadMore when status is loading", async () => {
const mockLoadMore = jest.fn();
render(
<LogsTable
{...defaultProps}
status="loading"
onLoadMore={mockLoadMore}
/>,
);
// Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled();
});
test("should not trigger loadMore when hasMore is false", async () => {
const mockLoadMore = jest.fn();
render(
<LogsTable {...defaultProps} hasMore={false} onLoadMore={mockLoadMore} />,
);
// Wait for possible triggers
await new Promise((resolve) => setTimeout(resolve, 300));
expect(mockLoadMore).not.toHaveBeenCalled();
});
test("sentinel element should not be rendered when loading", () => {
const { container } = render(
<LogsTable {...defaultProps} status="loading-more" />,
);
// Check that no sentinel row with height: 1 exists
const sentinelRow = container.querySelector('tr[style*="height: 1"]');
expect(sentinelRow).not.toBeInTheDocument();
});
test("sentinel element should be rendered when not loading and hasMore", () => {
const { container } = render(
<LogsTable {...defaultProps} hasMore={true} status="idle" />,
);
// Check that sentinel row exists
const sentinelRow = container.querySelector('tr[style*="height: 1"]');
expect(sentinelRow).toBeInTheDocument();
});
});

View file

@ -2,6 +2,7 @@ import React from "react";
import { render, screen, fireEvent } from "@testing-library/react";
import "@testing-library/jest-dom";
import { LogsTable, LogTableRow } from "./logs-table";
import { PaginationStatus } from "@/lib/types";
// Mock next/navigation
const mockPush = jest.fn();
@ -23,7 +24,7 @@ const truncateText = originalTruncateText as jest.Mock;
describe("LogsTable", () => {
const defaultProps = {
data: [] as LogTableRow[],
isLoading: false,
status: "idle" as PaginationStatus,
error: null,
caption: "Test table caption",
emptyMessage: "No data found",
@ -69,7 +70,7 @@ describe("LogsTable", () => {
describe("Loading State", () => {
test("renders skeleton UI when isLoading is true", () => {
const { container } = render(
<LogsTable {...defaultProps} isLoading={true} />,
<LogsTable {...defaultProps} status="loading" />,
);
// Check for skeleton in the table caption
@ -101,7 +102,7 @@ describe("LogsTable", () => {
test("renders correct number of skeleton rows", () => {
const { container } = render(
<LogsTable {...defaultProps} isLoading={true} />,
<LogsTable {...defaultProps} status="loading" />,
);
const skeletonRows = container.querySelectorAll("tbody tr");
@ -115,27 +116,45 @@ describe("LogsTable", () => {
render(
<LogsTable
{...defaultProps}
error={{ name: "Error", message: errorMessage }}
status="error"
error={{ name: "Error", message: errorMessage } as Error}
/>,
);
expect(
screen.getByText(`Error fetching data: ${errorMessage}`),
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument();
});
test("renders default error message when error.message is not available", () => {
render(
<LogsTable {...defaultProps} error={{ name: "Error", message: "" }} />,
<LogsTable
{...defaultProps}
status="error"
error={{ name: "Error", message: "" } as Error}
/>,
);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(
screen.getByText(
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument();
});
test("renders default error message when error prop is an object without message", () => {
render(<LogsTable {...defaultProps} error={{} as Error} />);
render(
<LogsTable {...defaultProps} status="error" error={{} as Error} />,
);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(
screen.getByText(
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument();
});
@ -143,7 +162,8 @@ describe("LogsTable", () => {
render(
<LogsTable
{...defaultProps}
error={{ name: "Error", message: "Test error" }}
status="error"
error={{ name: "Error", message: "Test error" } as Error}
/>,
);
const table = screen.queryByRole("table");
@ -337,14 +357,19 @@ describe("LogsTable", () => {
render(<LogsTable {...defaultProps} data={mockData} />);
const table = screen.getByRole("table");
expect(table).toBeInTheDocument();
const tables = screen.getAllByRole("table");
expect(tables).toHaveLength(2); // Fixed header table + body table
const columnHeaders = screen.getAllByRole("columnheader");
expect(columnHeaders).toHaveLength(4);
const rows = screen.getAllByRole("row");
expect(rows).toHaveLength(2); // 1 header row + 1 data row
expect(rows).toHaveLength(3); // 1 header row + 1 data row + 1 "no more items" row
expect(screen.getByText("Input")).toBeInTheDocument();
expect(screen.getByText("Output")).toBeInTheDocument();
expect(screen.getByText("Model")).toBeInTheDocument();
expect(screen.getByText("Created")).toBeInTheDocument();
});
});
});

View file

@ -1,7 +1,10 @@
"use client";
import { useRouter } from "next/navigation";
import { useRef } from "react";
import { truncateText } from "@/lib/truncate-text";
import { PaginationStatus } from "@/lib/types";
import { useInfiniteScroll } from "@/hooks/useInfiniteScroll";
import {
Table,
TableBody,
@ -24,65 +27,107 @@ export interface LogTableRow {
}
interface LogsTableProps {
/** Array of log table row data to display */
data: LogTableRow[];
isLoading: boolean;
/** Current loading/error status */
status: PaginationStatus;
/** Whether more data is available to load */
hasMore?: boolean;
/** Error state, null if no error */
error: Error | null;
/** Table caption for accessibility */
caption: string;
/** Message to show when no data is available */
emptyMessage: string;
/** Callback function to load more data */
onLoadMore?: () => void;
}
export function LogsTable({
data,
isLoading,
status,
hasMore = false,
error,
caption,
emptyMessage,
onLoadMore,
}: LogsTableProps) {
const router = useRouter();
const tableContainerRef = useRef<HTMLDivElement>(null);
const tableHeader = (
<TableHeader>
<TableRow>
<TableHead>Input</TableHead>
<TableHead>Output</TableHead>
<TableHead>Model</TableHead>
<TableHead className="text-right">Created</TableHead>
</TableRow>
</TableHeader>
// Use Intersection Observer for infinite scroll
const sentinelRef = useInfiniteScroll(onLoadMore, {
enabled: hasMore && status === "idle",
rootMargin: "100px",
threshold: 0.1,
});
// Fixed header component
const FixedHeader = () => (
<div className="bg-background border-b border-border">
<Table>
<TableHeader>
<TableRow>
<TableHead className="w-1/4">Input</TableHead>
<TableHead className="w-1/4">Output</TableHead>
<TableHead className="w-1/4">Model</TableHead>
<TableHead className="w-1/4 text-right">Created</TableHead>
</TableRow>
</TableHeader>
</Table>
</div>
);
if (isLoading) {
if (status === "loading") {
return (
<Table>
<TableCaption>
<Skeleton className="h-4 w-[250px] mx-auto" />
</TableCaption>
{tableHeader}
<TableBody>
{[...Array(3)].map((_, i) => (
<TableRow key={`skeleton-${i}`}>
<TableCell>
<Skeleton className="h-4 w-full" />
</TableCell>
<TableCell>
<Skeleton className="h-4 w-full" />
</TableCell>
<TableCell>
<Skeleton className="h-4 w-3/4" />
</TableCell>
<TableCell className="text-right">
<Skeleton className="h-4 w-1/2 ml-auto" />
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
<div className="h-full flex flex-col">
<FixedHeader />
<div ref={tableContainerRef} className="overflow-auto flex-1 min-h-0">
<Table>
<TableCaption>
<Skeleton className="h-4 w-[250px] mx-auto" />
</TableCaption>
<TableBody>
{[...Array(3)].map((_, i) => (
<TableRow key={`skeleton-${i}`}>
<TableCell className="w-1/4">
<Skeleton className="h-4 w-full" />
</TableCell>
<TableCell className="w-1/4">
<Skeleton className="h-4 w-full" />
</TableCell>
<TableCell className="w-1/4">
<Skeleton className="h-4 w-3/4" />
</TableCell>
<TableCell className="w-1/4 text-right">
<Skeleton className="h-4 w-1/2 ml-auto" />
</TableCell>
</TableRow>
))}
</TableBody>
</Table>
</div>
</div>
);
}
if (error) {
if (status === "error") {
return (
<p>Error fetching data: {error.message || "An unknown error occurred"}</p>
<div className="flex flex-col items-center justify-center p-8 space-y-4">
<div className="text-destructive font-medium">
Unable to load chat completions
</div>
<div className="text-sm text-muted-foreground text-center max-w-md">
{error?.message ||
"An unexpected error occurred while loading the data."}
</div>
<button
onClick={() => window.location.reload()}
className="px-4 py-2 bg-primary text-primary-foreground rounded-md hover:bg-primary/90 transition-colors"
>
Retry
</button>
</div>
);
}
@ -91,23 +136,60 @@ export function LogsTable({
}
return (
<Table>
<TableCaption>{caption}</TableCaption>
{tableHeader}
<TableBody>
{data.map((row) => (
<TableRow
key={row.id}
onClick={() => router.push(row.detailPath)}
className="cursor-pointer hover:bg-muted/50"
>
<TableCell>{truncateText(row.input)}</TableCell>
<TableCell>{truncateText(row.output)}</TableCell>
<TableCell>{row.model}</TableCell>
<TableCell className="text-right">{row.createdTime}</TableCell>
</TableRow>
))}
</TableBody>
</Table>
<div className="h-full flex flex-col">
<FixedHeader />
<div ref={tableContainerRef} className="overflow-auto flex-1 min-h-0">
<Table>
<TableCaption className="sr-only">{caption}</TableCaption>
<TableBody>
{data.map((row) => (
<TableRow
key={row.id}
onClick={() => router.push(row.detailPath)}
className="cursor-pointer hover:bg-muted/50"
>
<TableCell className="w-1/4">
{truncateText(row.input)}
</TableCell>
<TableCell className="w-1/4">
{truncateText(row.output)}
</TableCell>
<TableCell className="w-1/4">{row.model}</TableCell>
<TableCell className="w-1/4 text-right">
{row.createdTime}
</TableCell>
</TableRow>
))}
{/* Sentinel element for infinite scroll */}
{hasMore && status === "idle" && (
<TableRow ref={sentinelRef} style={{ height: 1 }}>
<TableCell colSpan={4} style={{ padding: 0, border: 0 }} />
</TableRow>
)}
{status === "loading-more" && (
<TableRow>
<TableCell colSpan={4} className="text-center py-4">
<div className="flex items-center justify-center space-x-2">
<Skeleton className="h-4 w-4 rounded-full" />
<span className="text-sm text-muted-foreground">
Loading more...
</span>
</div>
</TableCell>
</TableRow>
)}
{!hasMore && data.length > 0 && (
<TableRow>
<TableCell colSpan={4} className="text-center py-4">
<span className="text-sm text-muted-foreground">
No more items to load
</span>
</TableCell>
</TableRow>
)}
</TableBody>
</Table>
</div>
</div>
);
}

View file

@ -15,26 +15,60 @@ jest.mock("next/navigation", () => ({
// Mock helper functions
jest.mock("@/lib/truncate-text");
// Mock the client
jest.mock("@/lib/client", () => ({
client: {
responses: {
list: jest.fn(),
},
},
}));
// Mock the usePagination hook
const mockLoadMore = jest.fn();
jest.mock("@/hooks/usePagination", () => ({
usePagination: jest.fn(() => ({
data: [],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
})),
}));
// Import the mocked functions
import { truncateText as originalTruncateText } from "@/lib/truncate-text";
// Import the mocked hook
import { usePagination } from "@/hooks/usePagination";
const mockedUsePagination = usePagination as jest.MockedFunction<
typeof usePagination
>;
// Cast to jest.Mock for typings
const truncateText = originalTruncateText as jest.Mock;
describe("ResponsesTable", () => {
const defaultProps = {
data: [] as OpenAIResponse[],
isLoading: false,
error: null,
};
const defaultProps = {};
beforeEach(() => {
// Reset all mocks before each test
mockPush.mockClear();
truncateText.mockClear();
mockLoadMore.mockClear();
jest.clearAllMocks();
// Default pass-through implementation
truncateText.mockImplementation((text: string | undefined) => text);
// Default hook return value
mockedUsePagination.mockReturnValue({
data: [],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
});
test("renders without crashing with default props", () => {
@ -65,7 +99,16 @@ describe("ResponsesTable", () => {
],
};
render(<ResponsesTable {...defaultProps} data={[mockResponse]} />);
// Configure the mock to return our test data
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
const row = screen.getByText("Test input").closest("tr");
if (row) {
@ -77,10 +120,16 @@ describe("ResponsesTable", () => {
});
describe("Loading State", () => {
test("renders skeleton UI when isLoading is true", () => {
const { container } = render(
<ResponsesTable {...defaultProps} isLoading={true} />,
);
test("renders skeleton UI when status is loading", () => {
mockedUsePagination.mockReturnValue({
data: [],
status: "loading",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
const { container } = render(<ResponsesTable {...defaultProps} />);
// Check for skeleton in the table caption
const tableCaption = container.querySelector("caption");
@ -105,42 +154,50 @@ describe("ResponsesTable", () => {
});
describe("Error State", () => {
test("renders error message when error prop is provided", () => {
test("renders error message when error is provided", () => {
const errorMessage = "Network Error";
render(
<ResponsesTable
{...defaultProps}
error={{ name: "Error", message: errorMessage }}
/>,
);
mockedUsePagination.mockReturnValue({
data: [],
status: "error",
hasMore: false,
error: { name: "Error", message: errorMessage } as Error,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(
screen.getByText(`Error fetching data: ${errorMessage}`),
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(screen.getByText(errorMessage)).toBeInTheDocument();
});
test("renders default error message when error.message is not available", () => {
render(
<ResponsesTable
{...defaultProps}
error={{ name: "Error", message: "" }}
/>,
);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
});
test.each([{ name: "Error", message: "" }, {}])(
"renders default error message when error has no message",
(errorObject) => {
mockedUsePagination.mockReturnValue({
data: [],
status: "error",
hasMore: false,
error: errorObject as Error,
loadMore: mockLoadMore,
});
test("renders default error message when error prop is an object without message", () => {
render(<ResponsesTable {...defaultProps} error={{} as Error} />);
expect(
screen.getByText("Error fetching data: An unknown error occurred"),
).toBeInTheDocument();
});
render(<ResponsesTable {...defaultProps} />);
expect(
screen.getByText("Unable to load chat completions"),
).toBeInTheDocument();
expect(
screen.getByText(
"An unexpected error occurred while loading the data.",
),
).toBeInTheDocument();
},
);
});
describe("Empty State", () => {
test('renders "No responses found." and no table when data array is empty', () => {
render(<ResponsesTable data={[]} isLoading={false} error={null} />);
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("No responses found.")).toBeInTheDocument();
// Ensure that the table structure is NOT rendered in the empty state
@ -151,7 +208,7 @@ describe("ResponsesTable", () => {
describe("Data Rendering", () => {
test("renders table caption, headers, and response data correctly", () => {
const mockResponses = [
const mockResponses: OpenAIResponse[] = [
{
id: "resp_1",
object: "response" as const,
@ -196,9 +253,15 @@ describe("ResponsesTable", () => {
},
];
render(
<ResponsesTable data={mockResponses} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: mockResponses,
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
// Table caption
expect(
@ -246,9 +309,15 @@ describe("ResponsesTable", () => {
],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("Simple string input")).toBeInTheDocument();
});
@ -272,9 +341,15 @@ describe("ResponsesTable", () => {
],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("Array input text")).toBeInTheDocument();
});
@ -294,9 +369,15 @@ describe("ResponsesTable", () => {
],
};
const { container } = render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
const { container } = render(<ResponsesTable {...defaultProps} />);
// Find the input cell (first cell in the data row) and verify it's empty
const inputCell = container.querySelector("tbody tr td:first-child");
@ -323,9 +404,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("Simple string output")).toBeInTheDocument();
});
@ -349,9 +436,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("Array output text")).toBeInTheDocument();
});
@ -374,9 +467,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(
screen.getByText('search_function({"query": "test"})'),
).toBeInTheDocument();
@ -400,9 +499,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(screen.getByText("simple_function({})")).toBeInTheDocument();
});
@ -423,9 +528,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
expect(
screen.getByText("web_search_call(status: completed)"),
).toBeInTheDocument();
@ -449,9 +560,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
// Should contain the JSON stringified version
expect(screen.getByText(/unknown_call/)).toBeInTheDocument();
});
@ -472,9 +589,15 @@ describe("ResponsesTable", () => {
input: [{ type: "message", content: "input" }],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
// Should contain the JSON stringified version of the output array
expect(screen.getByText(/unknown_type/)).toBeInTheDocument();
});
@ -520,18 +643,21 @@ describe("ResponsesTable", () => {
],
};
render(
<ResponsesTable data={[mockResponse]} isLoading={false} error={null} />,
);
mockedUsePagination.mockReturnValue({
data: [mockResponse],
status: "idle",
hasMore: false,
error: null,
loadMore: mockLoadMore,
});
render(<ResponsesTable {...defaultProps} />);
// The truncated text should be present for both input and output
const truncatedTexts = screen.getAllByText(
longInput.slice(0, 10) + "...",
);
expect(truncatedTexts.length).toBe(2); // one for input, one for output
truncatedTexts.forEach((textElement) =>
expect(textElement).toBeInTheDocument(),
);
});
});
});

View file

@ -2,10 +2,13 @@
import {
OpenAIResponse,
ResponseInput,
ResponseInputMessageContent,
UsePaginationOptions,
} from "@/lib/types";
import { LogsTable, LogTableRow } from "@/components/logs/logs-table";
import { usePagination } from "@/hooks/usePagination";
import { client } from "@/lib/client";
import type { ResponseListResponse } from "llama-stack-client/resources/responses/responses";
import {
isMessageInput,
isMessageItem,
@ -17,11 +20,34 @@ import {
} from "./utils/item-types";
interface ResponsesTableProps {
data: OpenAIResponse[];
isLoading: boolean;
error: Error | null;
/** Optional pagination configuration */
paginationOptions?: UsePaginationOptions;
}
/**
* Helper function to convert ResponseListResponse.Data to OpenAIResponse
*/
const convertResponseListData = (
responseData: ResponseListResponse.Data,
): OpenAIResponse => {
return {
id: responseData.id,
created_at: responseData.created_at,
model: responseData.model,
object: responseData.object,
status: responseData.status,
output: responseData.output as OpenAIResponse["output"],
input: responseData.input as OpenAIResponse["input"],
error: responseData.error,
parallel_tool_calls: responseData.parallel_tool_calls,
previous_response_id: responseData.previous_response_id,
temperature: responseData.temperature,
top_p: responseData.top_p,
truncation: responseData.truncation,
user: responseData.user,
};
};
function getInputText(response: OpenAIResponse): string {
const firstInput = response.input.find(isMessageInput);
if (firstInput) {
@ -98,18 +124,43 @@ function formatResponseToRow(response: OpenAIResponse): LogTableRow {
};
}
export function ResponsesTable({
data,
isLoading,
error,
}: ResponsesTableProps) {
export function ResponsesTable({ paginationOptions }: ResponsesTableProps) {
const fetchFunction = async (params: {
after?: string;
limit: number;
model?: string;
order?: string;
}) => {
const response = await client.responses.list({
after: params.after,
limit: params.limit,
...(params.model && { model: params.model }),
...(params.order && { order: params.order }),
} as any);
const listResponse = response as ResponseListResponse;
return {
...listResponse,
data: listResponse.data.map(convertResponseListData),
};
};
const { data, status, hasMore, error, loadMore } = usePagination({
...paginationOptions,
fetchFunction,
errorMessagePrefix: "responses",
});
const formattedData = data.map(formatResponseToRow);
return (
<LogsTable
data={formattedData}
isLoading={isLoading}
status={status}
hasMore={hasMore}
error={error}
onLoadMore={loadMore}
caption="A list of your recent responses."
emptyMessage="No responses found."
/>

View file

@ -0,0 +1,61 @@
import { test, expect } from "@playwright/test";
test.describe("LogsTable Scroll and Progressive Loading", () => {
test.beforeEach(async ({ page }) => {
// Navigate to the chat completions page
await page.goto("/logs/chat-completions");
// Wait for initial data to load
await page.waitForSelector("table tbody tr", { timeout: 10000 });
});
test("should progressively load more data to fill tall viewports", async ({
page,
}) => {
// Set a tall viewport (1400px height)
await page.setViewportSize({ width: 1200, height: 1400 });
// Wait for the table to be visible
await page.waitForSelector("table");
// Wait a bit for progressive loading to potentially trigger
await page.waitForTimeout(3000);
// Count the number of rows loaded
const rowCount = await page.locator("table tbody tr").count();
// With a 1400px viewport, we should have more than the default 20 rows
// Assuming each row is ~50px, we should fit at least 25-30 rows
expect(rowCount).toBeGreaterThan(20);
});
test("should trigger infinite scroll when scrolling near bottom", async ({
page,
}) => {
// Set a medium viewport
await page.setViewportSize({ width: 1200, height: 800 });
// Wait for initial load
await page.waitForSelector("table tbody tr");
// Get initial row count
const initialRowCount = await page.locator("table tbody tr").count();
// Find the scrollable container
const scrollContainer = page.locator("div.overflow-auto").first();
// Scroll to near the bottom
await scrollContainer.evaluate((element) => {
element.scrollTop = element.scrollHeight - element.clientHeight - 100;
});
// Wait for loading indicator or new data
await page.waitForTimeout(2000);
// Check if more rows were loaded
const newRowCount = await page.locator("table tbody tr").count();
// We should have more rows after scrolling
expect(newRowCount).toBeGreaterThan(initialRowCount);
});
});

View file

@ -0,0 +1,55 @@
"use client";
import { useRef, useEffect } from "react";
interface UseInfiniteScrollOptions {
/** Whether the feature is enabled (e.g., hasMore data) */
enabled?: boolean;
/** Threshold for intersection (0-1, how much of sentinel must be visible) */
threshold?: number;
/** Margin around root to trigger earlier (e.g., "100px" to load 100px before visible) */
rootMargin?: string;
}
/**
* Custom hook for infinite scroll using Intersection Observer
*
* @param onLoadMore - Callback to load more data
* @param options - Configuration options
* @returns ref to attach to sentinel element
*/
export function useInfiniteScroll(
onLoadMore: (() => void) | undefined,
options: UseInfiniteScrollOptions = {},
) {
const { enabled = true, threshold = 0.1, rootMargin = "100px" } = options;
const sentinelRef = useRef<HTMLTableRowElement>(null);
useEffect(() => {
if (!onLoadMore || !enabled) return;
const observer = new IntersectionObserver(
(entries) => {
const [entry] = entries;
if (entry.isIntersecting) {
onLoadMore();
}
},
{
threshold,
rootMargin,
},
);
const sentinel = sentinelRef.current;
if (sentinel) {
observer.observe(sentinel);
}
return () => {
observer.disconnect();
};
}, [onLoadMore, enabled, threshold, rootMargin]);
return sentinelRef;
}

View file

@ -0,0 +1,132 @@
"use client";
import { useState, useCallback, useEffect, useRef } from "react";
import { PaginationStatus, UsePaginationOptions } from "@/lib/types";
interface PaginationState<T> {
data: T[];
status: PaginationStatus;
hasMore: boolean;
error: Error | null;
lastId: string | null;
}
interface PaginationResponse<T> {
data: T[];
has_more: boolean;
last_id: string;
first_id: string;
object: "list";
}
export interface PaginationReturn<T> {
data: T[];
status: PaginationStatus;
hasMore: boolean;
error: Error | null;
loadMore: () => void;
}
interface UsePaginationParams<T> extends UsePaginationOptions {
fetchFunction: (params: {
after?: string;
limit: number;
model?: string;
order?: string;
}) => Promise<PaginationResponse<T>>;
errorMessagePrefix: string;
}
export function usePagination<T>({
limit = 20,
model,
order = "desc",
fetchFunction,
errorMessagePrefix,
}: UsePaginationParams<T>): PaginationReturn<T> {
const [state, setState] = useState<PaginationState<T>>({
data: [],
status: "loading",
hasMore: true,
error: null,
lastId: null,
});
// Use refs to avoid stale closures
const stateRef = useRef(state);
stateRef.current = state;
/**
* Fetches data from the API with cursor-based pagination
*/
const fetchData = useCallback(
async (after?: string, targetRows?: number) => {
const isInitialLoad = !after;
const fetchLimit = targetRows || limit;
try {
setState((prev) => ({
...prev,
status: isInitialLoad ? "loading" : "loading-more",
error: null,
}));
const response = await fetchFunction({
after: after || undefined,
limit: fetchLimit,
...(model && { model }),
...(order && { order }),
});
setState((prev) => ({
...prev,
data: isInitialLoad
? response.data
: [...prev.data, ...response.data],
hasMore: response.has_more,
lastId: response.last_id || null,
status: "idle",
}));
} catch (err) {
const errorMessage = isInitialLoad
? `Failed to load ${errorMessagePrefix}. Please try refreshing the page.`
: `Failed to load more ${errorMessagePrefix}. Please try again.`;
const error =
err instanceof Error
? new Error(`${errorMessage} ${err.message}`)
: new Error(errorMessage);
setState((prev) => ({
...prev,
error,
status: "error",
}));
}
},
[limit, model, order, fetchFunction, errorMessagePrefix],
);
/**
* Loads more data for infinite scroll
*/
const loadMore = useCallback(() => {
const currentState = stateRef.current;
if (currentState.hasMore && currentState.status === "idle") {
fetchData(currentState.lastId || undefined);
}
}, [fetchData]);
// Auto-load initial data on mount
useEffect(() => {
fetchData();
}, []);
return {
data: state.data,
status: state.status,
hasMore: state.hasMore,
error: state.error,
loadMore,
};
}

View file

@ -100,6 +100,7 @@ const config: Config = {
// However, for mocks, sometimes explicit mapping is needed.
"^@/lib/(.*)$": "<rootDir>/lib/$1",
"^@/components/(.*)$": "<rootDir>/components/$1",
"^@/hooks/(.*)$": "<rootDir>/hooks/$1",
// Add other aliases here if needed
},
@ -148,7 +149,7 @@ const config: Config = {
// setupFiles: [],
// A list of paths to modules that run some code to configure or set up the testing framework before each test
// setupFilesAfterEnv: [],
setupFilesAfterEnv: ["<rootDir>/jest.setup.ts"],
// The number of seconds after which a test is considered as slow and reported as such in the results.
// slowTestThreshold: 5,
@ -172,9 +173,7 @@ const config: Config = {
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
// testPathIgnorePatterns: [
// "/node_modules/"
// ],
testPathIgnorePatterns: ["/e2e/"],
// The regexp pattern or array of patterns that Jest uses to detect test files
// testRegex: [],

View file

@ -0,0 +1,23 @@
// Import llama-stack-client shims for Node environment
import "llama-stack-client/shims/node";
// Add any other global test setup here
import "@testing-library/jest-dom";
// Mock ResizeObserver globally
global.ResizeObserver = class ResizeObserver {
observe() {}
unobserve() {}
disconnect() {}
};
// Mock IntersectionObserver globally
global.IntersectionObserver = class IntersectionObserver {
constructor(callback: IntersectionObserverCallback) {}
observe() {}
unobserve() {}
disconnect() {}
takeRecords() {
return [];
}
} as any;

View file

@ -43,6 +43,33 @@ export interface ChatCompletion {
input_messages: ChatMessage[];
}
export interface ListChatCompletionsResponse {
data: ChatCompletion[];
has_more: boolean;
first_id: string;
last_id: string;
object: "list";
}
export type PaginationStatus = "idle" | "loading" | "loading-more" | "error";
export interface PaginationState {
data: ChatCompletion[];
status: PaginationStatus;
hasMore: boolean;
error: Error | null;
lastId: string | null;
}
export interface UsePaginationOptions {
/** Number of items to load per page (default: 20) */
limit?: number;
/** Filter by specific model */
model?: string;
/** Sort order for results (default: "desc") */
order?: "asc" | "desc";
}
// Response types for OpenAI Responses API
export interface ResponseInputMessageContent {
text?: string;

View file

@ -9,7 +9,8 @@
"lint": "next lint",
"format": "prettier --write \"./**/*.{ts,tsx}\"",
"format:check": "prettier --check \"./**/*.{ts,tsx}\"",
"test": "jest"
"test": "jest",
"test:e2e": "playwright test"
},
"dependencies": {
"@radix-ui/react-dialog": "^1.1.13",

View file

@ -0,0 +1,36 @@
import { defineConfig, devices } from "@playwright/test";
/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: "./e2e",
/* Run tests in files in parallel */
fullyParallel: true,
/* Reporter to use. See https://playwright.dev/docs/test-reporters */
reporter: "line",
/* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */
use: {
/* Base URL to use in actions like `await page.goto('/')`. */
baseURL: "http://localhost:8322",
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
trace: "on-first-retry",
},
/* Configure projects for major browsers */
projects: [
{
name: "chromium",
use: { ...devices["Desktop Chrome"] },
},
],
/* Run your local dev server before starting the tests */
webServer: {
command: "npm run dev",
url: "http://localhost:8322",
reuseExistingServer: !process.env.CI,
timeout: 120 * 1000,
},
});