This commit is contained in:
Xi Yan 2025-02-12 20:31:42 -08:00
parent 9a8f4025c1
commit b20742fce7
3 changed files with 5 additions and 5 deletions

View file

@ -9,8 +9,8 @@ from modules.api import llama_stack_api
def benchmarks(): def benchmarks():
# Eval Tasks Section # Benchmarks Section
st.header("Eval Tasks") st.header("Benchmarks")
benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()} benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()}

View file

@ -21,7 +21,7 @@ def resources_page():
"Shields", "Shields",
"Scoring Functions", "Scoring Functions",
"Datasets", "Datasets",
"Eval Tasks", "Benchmarks",
] ]
icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"] icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"]
selected_resource = option_menu( selected_resource = option_menu(
@ -35,7 +35,7 @@ def resources_page():
}, },
}, },
) )
if selected_resource == "Eval Tasks": if selected_resource == "Benchmarks":
benchmarks() benchmarks()
elif selected_resource == "Vector Databases": elif selected_resource == "Vector Databases":
vector_dbs() vector_dbs()

View file

@ -14,7 +14,7 @@ from modules.api import llama_stack_api
def select_benchmark_1(): def select_benchmark_1():
# Select Eval Tasks # Select Benchmarks
st.subheader("1. Choose An Eval Task") st.subheader("1. Choose An Eval Task")
benchmarks = llama_stack_api.client.benchmarks.list() benchmarks = llama_stack_api.client.benchmarks.list()
benchmarks = {et.identifier: et for et in benchmarks} benchmarks = {et.identifier: et for et in benchmarks}