forked from phoenix-oss/llama-stack-mirror
feat(ui): implement chat completion views (#2201)
# What does this PR do? Implements table and detail views for chat completions <img width="1548" alt="image" src="https://github.com/user-attachments/assets/01061b7f-0d47-4b3b-b5ac-2df8f9035ef6" /> <img width="1549" alt="image" src="https://github.com/user-attachments/assets/738d8612-8258-4c2c-858b-bee39030649f" /> ## Test Plan npm run test
This commit is contained in:
parent
d8c6ab9bfc
commit
2708312168
27 changed files with 6729 additions and 38 deletions
|
@ -23,6 +23,7 @@ import yaml
|
|||
from fastapi import Body, FastAPI, HTTPException, Request
|
||||
from fastapi import Path as FastapiPath
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse, StreamingResponse
|
||||
from openai import BadRequestError
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
@ -465,6 +466,17 @@ def main(args: argparse.Namespace | None = None):
|
|||
window_seconds=window_seconds,
|
||||
)
|
||||
|
||||
# --- CORS middleware for local development ---
|
||||
# TODO: move to reverse proxy
|
||||
ui_port = os.environ.get("LLAMA_STACK_UI_PORT", 8322)
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=[f"http://localhost:{ui_port}"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
try:
|
||||
impls = asyncio.run(construct_stack(config))
|
||||
except InvalidProviderError as e:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue