feat(ui): add views for Responses (#2293)

# What does this PR do?
* Add responses list and detail views
* Refactored components to be shared as much as possible between chat
completions and responses

## Test Plan
<img width="2014" alt="image"
src="https://github.com/user-attachments/assets/6dee12ea-8876-4351-a6eb-2338058466ef"
/>
<img width="2021" alt="image"
src="https://github.com/user-attachments/assets/6c7c71b8-25b7-4199-9c57-6960be5580c8"
/>

added tests
This commit is contained in:
ehhuang 2025-05-28 09:51:22 -07:00 committed by GitHub
parent 6352078e4b
commit 56e5ddb39f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
34 changed files with 3282 additions and 380 deletions

View file

@ -0,0 +1,12 @@
import LlamaStackClient from "llama-stack-client";
import OpenAI from "openai";
export const client =
process.env.NEXT_PUBLIC_USE_OPENAI_CLIENT === "true" // useful for testing
? new OpenAI({
apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
dangerouslyAllowBrowser: true,
})
: new LlamaStackClient({
baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
});

View file

@ -43,10 +43,14 @@ export function extractDisplayableText(
return "";
}
let textPart = extractTextFromContentPart(message.content);
const textPart = extractTextFromContentPart(message.content);
let toolCallPart = "";
if (message.tool_calls && message.tool_calls.length > 0) {
if (
message.tool_calls &&
Array.isArray(message.tool_calls) &&
message.tool_calls.length > 0
) {
// For summary, usually the first tool call is sufficient
toolCallPart = formatToolCallToString(message.tool_calls[0]);
}

View file

@ -18,20 +18,20 @@ export interface ImageUrlContentBlock {
export type ChatMessageContentPart =
| TextContentBlock
| ImageUrlContentBlock
| { type: string; [key: string]: any }; // Fallback for other potential types
| { type: string; [key: string]: unknown }; // Fallback for other potential types
export interface ChatMessage {
role: string;
content: string | ChatMessageContentPart[]; // Updated content type
name?: string | null;
tool_calls?: any | null; // This could also be refined to a more specific ToolCall[] type
tool_calls?: unknown | null; // This could also be refined to a more specific ToolCall[] type
}
export interface Choice {
message: ChatMessage;
finish_reason: string;
index: number;
logprobs?: any | null;
logprobs?: unknown | null;
}
export interface ChatCompletion {
@ -42,3 +42,62 @@ export interface ChatCompletion {
model: string;
input_messages: ChatMessage[];
}
// Response types for OpenAI Responses API
export interface ResponseInputMessageContent {
text?: string;
type: "input_text" | "input_image" | "output_text";
image_url?: string;
detail?: "low" | "high" | "auto";
}
export interface ResponseMessage {
content: string | ResponseInputMessageContent[];
role: "system" | "developer" | "user" | "assistant";
type: "message";
id?: string;
status?: string;
}
export interface ResponseToolCall {
id: string;
status: string;
type: "web_search_call" | "function_call";
arguments?: string;
call_id?: string;
name?: string;
}
export type ResponseOutput = ResponseMessage | ResponseToolCall;
export interface ResponseInput {
type: string;
content?: string | ResponseInputMessageContent[];
role?: string;
[key: string]: unknown; // Flexible for various input types
}
export interface OpenAIResponse {
id: string;
created_at: number;
model: string;
object: "response";
status: string;
output: ResponseOutput[];
input: ResponseInput[];
error?: {
code: string;
message: string;
};
parallel_tool_calls?: boolean;
previous_response_id?: string;
temperature?: number;
top_p?: number;
truncation?: string;
user?: string;
}
export interface InputItemListResponse {
data: ResponseInput[];
object: "list";
}