llama-stack/llama_stack/distribution/ui/page/distribution/eval_tasks.py
Angel Nunez Mencias b174effe05
All checks were successful
Build and Push playground container / build-playground (push) Successful in 1m57s
Build and Push container / build (push) Successful in 4m33s
fix security update
2025-06-03 20:07:06 +02:00

20 lines
687 B
Python

# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import streamlit as st
from llama_stack.distribution.ui.modules.api import LlamaStackApi
def benchmarks():
# Benchmarks Section
st.header("Benchmarks")
benchmarks_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.benchmarks.list()}
if len(benchmarks_info) > 0:
selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect")
st.json(benchmarks_info[selected_benchmark], expanded=True)