mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-02 04:28:46 +00:00
# What does this PR do? - Update `/eval-tasks` to `/benchmarks` - ⚠️ Remove differentiation between `app` v.s. `benchmark` eval task config. Now we only have `BenchmarkConfig`. The overloaded `benchmark` is confusing and do not add any value. Backward compatibility is being kept as the "type" is not being used anywhere. [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan - This change is backward compatible - Run notebook test with ``` pytest -v -s --nbval-lax ./docs/getting_started.ipynb pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb ``` <img width="846" alt="image" src="https://github.com/user-attachments/assets/d2fc06a7-593a-444f-bc1f-10ab9b0c843d" /> [//]: # (## Documentation) [//]: # (- [ ] Added a Changelog entry if the change is significant) --------- Signed-off-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Sébastien Han <seb@redhat.com> Signed-off-by: reidliu <reid201711@gmail.com> Co-authored-by: Ihar Hrachyshka <ihar.hrachyshka@gmail.com> Co-authored-by: Ben Browning <ben324@gmail.com> Co-authored-by: Sébastien Han <seb@redhat.com> Co-authored-by: Reid <61492567+reidliu41@users.noreply.github.com> Co-authored-by: reidliu <reid201711@gmail.com> Co-authored-by: Yuan Tang <terrytangyuan@gmail.com>
51 lines
1.4 KiB
Python
51 lines
1.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from page.distribution.benchmarks import benchmarks
|
|
from page.distribution.datasets import datasets
|
|
from page.distribution.models import models
|
|
from page.distribution.scoring_functions import scoring_functions
|
|
from page.distribution.shields import shields
|
|
from page.distribution.vector_dbs import vector_dbs
|
|
from streamlit_option_menu import option_menu
|
|
|
|
|
|
def resources_page():
|
|
options = [
|
|
"Models",
|
|
"Vector Databases",
|
|
"Shields",
|
|
"Scoring Functions",
|
|
"Datasets",
|
|
"Benchmarks",
|
|
]
|
|
icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"]
|
|
selected_resource = option_menu(
|
|
None,
|
|
options,
|
|
icons=icons,
|
|
orientation="horizontal",
|
|
styles={
|
|
"nav-link": {
|
|
"font-size": "12px",
|
|
},
|
|
},
|
|
)
|
|
if selected_resource == "Benchmarks":
|
|
benchmarks()
|
|
elif selected_resource == "Vector Databases":
|
|
vector_dbs()
|
|
elif selected_resource == "Datasets":
|
|
datasets()
|
|
elif selected_resource == "Models":
|
|
models()
|
|
elif selected_resource == "Scoring Functions":
|
|
scoring_functions()
|
|
elif selected_resource == "Shields":
|
|
shields()
|
|
|
|
|
|
resources_page()
|