forked from phoenix-oss/llama-stack-mirror
# What does this PR do? New Pages Added: - (1) Inspect Distro - (2) Evaluations: - (a) native evaluations (including generation) - (b) application evaluations (no generation, scoring only) - (3) Playground: - (a) chat - (b) RAG ## Test Plan ``` streamlit run app.py ``` #### Playground https://github.com/user-attachments/assets/6ca617e8-32ca-49b2-9774-185020ff5204 #### Inspect https://github.com/user-attachments/assets/01d52b2d-92af-4e3a-b623-a9b8ba22ba99 #### Evaluations (Generation + Scoring) https://github.com/user-attachments/assets/345845c7-2a2b-4095-960a-9ae40f6a93cf #### Evaluations (Scoring) https://github.com/user-attachments/assets/6cc1659f-eba4-49ca-a0a5-7c243557b4f5 ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
52 lines
1.4 KiB
Python
52 lines
1.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from page.distribution.datasets import datasets
|
|
from page.distribution.eval_tasks import eval_tasks
|
|
from page.distribution.memory_banks import memory_banks
|
|
from page.distribution.models import models
|
|
from page.distribution.scoring_functions import scoring_functions
|
|
from page.distribution.shields import shields
|
|
|
|
from streamlit_option_menu import option_menu
|
|
|
|
|
|
def resources_page():
|
|
options = [
|
|
"Models",
|
|
"Memory Banks",
|
|
"Shields",
|
|
"Scoring Functions",
|
|
"Datasets",
|
|
"Eval Tasks",
|
|
]
|
|
icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"]
|
|
selected_resource = option_menu(
|
|
None,
|
|
options,
|
|
icons=icons,
|
|
orientation="horizontal",
|
|
styles={
|
|
"nav-link": {
|
|
"font-size": "12px",
|
|
},
|
|
},
|
|
)
|
|
if selected_resource == "Eval Tasks":
|
|
eval_tasks()
|
|
elif selected_resource == "Memory Banks":
|
|
memory_banks()
|
|
elif selected_resource == "Datasets":
|
|
datasets()
|
|
elif selected_resource == "Models":
|
|
models()
|
|
elif selected_resource == "Scoring Functions":
|
|
scoring_functions()
|
|
elif selected_resource == "Shields":
|
|
shields()
|
|
|
|
|
|
resources_page()
|