From b20742fce742cf19b8293e12bff541adbb03047d Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Wed, 12 Feb 2025 20:31:42 -0800 Subject: [PATCH] replace --- llama_stack/distribution/ui/page/distribution/eval_tasks.py | 4 ++-- llama_stack/distribution/ui/page/distribution/resources.py | 4 ++-- llama_stack/distribution/ui/page/evaluations/native_eval.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/llama_stack/distribution/ui/page/distribution/eval_tasks.py b/llama_stack/distribution/ui/page/distribution/eval_tasks.py index b83023901..1428ae9ab 100644 --- a/llama_stack/distribution/ui/page/distribution/eval_tasks.py +++ b/llama_stack/distribution/ui/page/distribution/eval_tasks.py @@ -9,8 +9,8 @@ from modules.api import llama_stack_api def benchmarks(): - # Eval Tasks Section - st.header("Eval Tasks") + # Benchmarks Section + st.header("Benchmarks") benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()} diff --git a/llama_stack/distribution/ui/page/distribution/resources.py b/llama_stack/distribution/ui/page/distribution/resources.py index a86fda856..c0d4aac68 100644 --- a/llama_stack/distribution/ui/page/distribution/resources.py +++ b/llama_stack/distribution/ui/page/distribution/resources.py @@ -21,7 +21,7 @@ def resources_page(): "Shields", "Scoring Functions", "Datasets", - "Eval Tasks", + "Benchmarks", ] icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"] selected_resource = option_menu( @@ -35,7 +35,7 @@ def resources_page(): }, }, ) - if selected_resource == "Eval Tasks": + if selected_resource == "Benchmarks": benchmarks() elif selected_resource == "Vector Databases": vector_dbs() diff --git a/llama_stack/distribution/ui/page/evaluations/native_eval.py b/llama_stack/distribution/ui/page/evaluations/native_eval.py index e24da4eb3..39385dd14 100644 --- a/llama_stack/distribution/ui/page/evaluations/native_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/native_eval.py @@ -14,7 +14,7 @@ from modules.api import llama_stack_api def select_benchmark_1(): - # Select Eval Tasks + # Select Benchmarks st.subheader("1. Choose An Eval Task") benchmarks = llama_stack_api.client.benchmarks.list() benchmarks = {et.identifier: et for et in benchmarks}