forked from phoenix-oss/llama-stack-mirror
# What does this PR do? * Add responses list and detail views * Refactored components to be shared as much as possible between chat completions and responses ## Test Plan <img width="2014" alt="image" src="https://github.com/user-attachments/assets/6dee12ea-8876-4351-a6eb-2338058466ef" /> <img width="2021" alt="image" src="https://github.com/user-attachments/assets/6c7c71b8-25b7-4199-9c57-6960be5580c8" /> added tests
12 lines
405 B
TypeScript
12 lines
405 B
TypeScript
import LlamaStackClient from "llama-stack-client";
|
|
import OpenAI from "openai";
|
|
|
|
export const client =
|
|
process.env.NEXT_PUBLIC_USE_OPENAI_CLIENT === "true" // useful for testing
|
|
? new OpenAI({
|
|
apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
|
|
dangerouslyAllowBrowser: true,
|
|
})
|
|
: new LlamaStackClient({
|
|
baseURL: process.env.NEXT_PUBLIC_LLAMA_STACK_BASE_URL,
|
|
});
|