forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Implements table and detail views for chat completions <img width="1548" alt="image" src="https://github.com/user-attachments/assets/01061b7f-0d47-4b3b-b5ac-2df8f9035ef6" /> <img width="1549" alt="image" src="https://github.com/user-attachments/assets/738d8612-8258-4c2c-858b-bee39030649f" /> ## Test Plan npm run test
54 lines
1.6 KiB
TypeScript
54 lines
1.6 KiB
TypeScript
"use client";
|
|
|
|
import { useEffect, useState } from "react";
|
|
import LlamaStackClient from "llama-stack-client";
|
|
import { ChatCompletion } from "@/lib/types";
|
|
import { ChatCompletionsTable } from "@/components/chat-completions/chat-completion-table";
|
|
|
|
export default function ChatCompletionsPage() {
|
|
const [completions, setCompletions] = useState<ChatCompletion[]>([]);
|
|
const [isLoading, setIsLoading] = useState<boolean>(true);
|
|
const [error, setError] = useState<Error | null>(null);
|
|
|
|
useEffect(() => {
|
|
const client = new LlamaStackClient({
|
|
baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
|
|
});
|
|
const fetchCompletions = async () => {
|
|
setIsLoading(true);
|
|
setError(null);
|
|
try {
|
|
const response = await client.chat.completions.list();
|
|
const data = Array.isArray(response)
|
|
? response
|
|
: (response as any).data;
|
|
|
|
if (Array.isArray(data)) {
|
|
setCompletions(data);
|
|
} else {
|
|
console.error("Unexpected response structure:", response);
|
|
setError(new Error("Unexpected response structure"));
|
|
setCompletions([]);
|
|
}
|
|
} catch (err) {
|
|
console.error("Error fetching chat completions:", err);
|
|
setError(
|
|
err instanceof Error ? err : new Error("Failed to fetch completions"),
|
|
);
|
|
setCompletions([]);
|
|
} finally {
|
|
setIsLoading(false);
|
|
}
|
|
};
|
|
|
|
fetchCompletions();
|
|
}, []);
|
|
|
|
return (
|
|
<ChatCompletionsTable
|
|
completions={completions}
|
|
isLoading={isLoading}
|
|
error={error}
|
|
/>
|
|
);
|
|
}
|