mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
requirements
This commit is contained in:
parent
114595ce71
commit
e245f459bb
13 changed files with 148 additions and 160 deletions
|
@ -10,13 +10,13 @@ def main():
|
|||
# Evaluation pages
|
||||
application_evaluation_page = st.Page(
|
||||
"page/evaluations/app_eval.py",
|
||||
title="Application Evaluation",
|
||||
title="Evaluations (Scoring)",
|
||||
icon="📊",
|
||||
default=False,
|
||||
)
|
||||
native_evaluation_page = st.Page(
|
||||
"page/evaluations/native_eval.py",
|
||||
title="Native Evaluation",
|
||||
title="Evaluations (Generation + Scoring)",
|
||||
icon="📊",
|
||||
default=False,
|
||||
)
|
||||
|
@ -28,30 +28,12 @@ def main():
|
|||
rag_page = st.Page("page/playground/rag.py", title="RAG", icon="💬", default=False)
|
||||
|
||||
# Distribution pages
|
||||
resources_page = st.Page(
|
||||
"page/distribution/resources.py", title="Resources", icon="🔍", default=False
|
||||
)
|
||||
provider_page = st.Page(
|
||||
"page/distribution/providers.py", title="Provider", icon="🔍", default=False
|
||||
)
|
||||
model_page = st.Page(
|
||||
"page/distribution/models.py", title="Models", icon="🔍", default=False
|
||||
)
|
||||
memory_bank_page = st.Page(
|
||||
"page/distribution/memory_banks.py",
|
||||
title="Memory Banks",
|
||||
icon="🔍",
|
||||
default=False,
|
||||
)
|
||||
shield_page = st.Page(
|
||||
"page/distribution/shields.py", title="Shields", icon="🔍", default=False
|
||||
)
|
||||
scoring_function_page = st.Page(
|
||||
"page/distribution/scoring_functions.py",
|
||||
title="Scoring Functions",
|
||||
icon="🔍",
|
||||
default=False,
|
||||
)
|
||||
eval_task_page = st.Page(
|
||||
"page/distribution/eval_tasks.py",
|
||||
title="Eval Tasks",
|
||||
"page/distribution/providers.py",
|
||||
title="API Providers",
|
||||
icon="🔍",
|
||||
default=False,
|
||||
)
|
||||
|
@ -64,14 +46,7 @@ def main():
|
|||
application_evaluation_page,
|
||||
native_evaluation_page,
|
||||
],
|
||||
"Inspect": [
|
||||
provider_page,
|
||||
model_page,
|
||||
memory_bank_page,
|
||||
shield_page,
|
||||
scoring_function_page,
|
||||
eval_task_page,
|
||||
],
|
||||
"Inspect": [provider_page, resources_page],
|
||||
},
|
||||
expanded=False,
|
||||
)
|
||||
|
|
19
llama_stack/distribution/ui/page/distribution/datasets.py
Normal file
19
llama_stack/distribution/ui/page/distribution/datasets.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
|
||||
def datasets():
|
||||
st.header("Datasets")
|
||||
|
||||
datasets_info = {
|
||||
d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()
|
||||
}
|
||||
|
||||
selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
|
||||
st.json(datasets_info[selected_dataset], expanded=True)
|
|
@ -1,79 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
st.title("🔍 Inspect Distribution")
|
||||
|
||||
# API Section
|
||||
st.header("API Providers")
|
||||
apis_providers_info = llama_stack_api.client.providers.list()
|
||||
selected_api = st.selectbox("Select an API", list(apis_providers_info.keys()))
|
||||
st.dataframe([p.to_dict() for p in apis_providers_info[selected_api]], width=500)
|
||||
|
||||
# Models Section
|
||||
st.header("Models")
|
||||
models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()}
|
||||
|
||||
selected_model = st.selectbox("Select a model", list(models_info.keys()))
|
||||
st.json(models_info[selected_model])
|
||||
|
||||
# Memory Banks Section
|
||||
st.header("Memory Banks")
|
||||
memory_banks_info = {
|
||||
m.identifier: m.to_dict() for m in llama_stack_api.client.memory_banks.list()
|
||||
}
|
||||
|
||||
if len(memory_banks_info) > 0:
|
||||
selected_memory_bank = st.selectbox(
|
||||
"Select a memory bank", list(memory_banks_info.keys())
|
||||
)
|
||||
st.json(memory_banks_info[selected_memory_bank])
|
||||
else:
|
||||
st.info("No memory banks found")
|
||||
|
||||
# Shields Section
|
||||
st.header("Shields")
|
||||
|
||||
shields_info = {
|
||||
s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()
|
||||
}
|
||||
|
||||
selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
|
||||
st.json(shields_info[selected_shield])
|
||||
|
||||
# Datasets Section
|
||||
st.header("Datasets")
|
||||
|
||||
datasets_info = {
|
||||
d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()
|
||||
}
|
||||
|
||||
selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
|
||||
st.json(datasets_info[selected_dataset], expanded=False)
|
||||
|
||||
# Scoring Functions Section
|
||||
st.header("Scoring Functions")
|
||||
|
||||
scoring_functions_info = {
|
||||
s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()
|
||||
}
|
||||
|
||||
selected_scoring_function = st.selectbox(
|
||||
"Select a scoring function", list(scoring_functions_info.keys())
|
||||
)
|
||||
st.json(scoring_functions_info[selected_scoring_function], expanded=False)
|
||||
|
||||
# Eval Tasks Section
|
||||
st.header("Eval Tasks")
|
||||
|
||||
eval_tasks_info = {
|
||||
d.identifier: d.to_dict() for d in llama_stack_api.client.eval_tasks.list()
|
||||
}
|
||||
|
||||
selected_eval_task = st.selectbox("Select an eval task", list(eval_tasks_info.keys()))
|
||||
st.json(eval_tasks_info[selected_eval_task], expanded=True)
|
|
@ -7,12 +7,16 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
# Eval Tasks Section
|
||||
st.header("Eval Tasks")
|
||||
|
||||
eval_tasks_info = {
|
||||
d.identifier: d.to_dict() for d in llama_stack_api.client.eval_tasks.list()
|
||||
}
|
||||
def eval_tasks():
|
||||
# Eval Tasks Section
|
||||
st.header("Eval Tasks")
|
||||
|
||||
selected_eval_task = st.selectbox("Select an eval task", list(eval_tasks_info.keys()))
|
||||
st.json(eval_tasks_info[selected_eval_task], expanded=True)
|
||||
eval_tasks_info = {
|
||||
d.identifier: d.to_dict() for d in llama_stack_api.client.eval_tasks.list()
|
||||
}
|
||||
|
||||
selected_eval_task = st.selectbox(
|
||||
"Select an eval task", list(eval_tasks_info.keys()), key="eval_task_inspect"
|
||||
)
|
||||
st.json(eval_tasks_info[selected_eval_task], expanded=True)
|
||||
|
|
|
@ -7,15 +7,17 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
st.header("Memory Banks")
|
||||
memory_banks_info = {
|
||||
m.identifier: m.to_dict() for m in llama_stack_api.client.memory_banks.list()
|
||||
}
|
||||
|
||||
if len(memory_banks_info) > 0:
|
||||
selected_memory_bank = st.selectbox(
|
||||
"Select a memory bank", list(memory_banks_info.keys())
|
||||
)
|
||||
st.json(memory_banks_info[selected_memory_bank])
|
||||
else:
|
||||
st.info("No memory banks found")
|
||||
def memory_banks():
|
||||
st.header("Memory Banks")
|
||||
memory_banks_info = {
|
||||
m.identifier: m.to_dict() for m in llama_stack_api.client.memory_banks.list()
|
||||
}
|
||||
|
||||
if len(memory_banks_info) > 0:
|
||||
selected_memory_bank = st.selectbox(
|
||||
"Select a memory bank", list(memory_banks_info.keys())
|
||||
)
|
||||
st.json(memory_banks_info[selected_memory_bank])
|
||||
else:
|
||||
st.info("No memory banks found")
|
||||
|
|
|
@ -7,9 +7,13 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
# Models Section
|
||||
st.header("Models")
|
||||
models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()}
|
||||
|
||||
selected_model = st.selectbox("Select a model", list(models_info.keys()))
|
||||
st.json(models_info[selected_model])
|
||||
def models():
|
||||
# Models Section
|
||||
st.header("Models")
|
||||
models_info = {
|
||||
m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()
|
||||
}
|
||||
|
||||
selected_model = st.selectbox("Select a model", list(models_info.keys()))
|
||||
st.json(models_info[selected_model])
|
||||
|
|
|
@ -7,9 +7,14 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
st.header("API Providers")
|
||||
apis_providers_info = llama_stack_api.client.providers.list()
|
||||
# selected_api = st.selectbox("Select an API", list(apis_providers_info.keys()))
|
||||
for api in apis_providers_info.keys():
|
||||
st.markdown(f"###### {api}")
|
||||
st.dataframe([p.to_dict() for p in apis_providers_info[api]], width=500)
|
||||
|
||||
def providers():
|
||||
st.header("🔍 API Providers")
|
||||
apis_providers_info = llama_stack_api.client.providers.list()
|
||||
# selected_api = st.selectbox("Select an API", list(apis_providers_info.keys()))
|
||||
for api in apis_providers_info.keys():
|
||||
st.markdown(f"###### {api}")
|
||||
st.dataframe([p.to_dict() for p in apis_providers_info[api]], width=500)
|
||||
|
||||
|
||||
providers()
|
||||
|
|
52
llama_stack/distribution/ui/page/distribution/resources.py
Normal file
52
llama_stack/distribution/ui/page/distribution/resources.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from page.distribution.datasets import datasets
|
||||
from page.distribution.eval_tasks import eval_tasks
|
||||
from page.distribution.memory_banks import memory_banks
|
||||
from page.distribution.models import models
|
||||
from page.distribution.scoring_functions import scoring_functions
|
||||
from page.distribution.shields import shields
|
||||
|
||||
from streamlit_option_menu import option_menu
|
||||
|
||||
|
||||
def resources_page():
|
||||
options = [
|
||||
"Models",
|
||||
"Memory Banks",
|
||||
"Shields",
|
||||
"Scoring Functions",
|
||||
"Datasets",
|
||||
"Eval Tasks",
|
||||
]
|
||||
icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"]
|
||||
selected_resource = option_menu(
|
||||
None,
|
||||
options,
|
||||
icons=icons,
|
||||
orientation="horizontal",
|
||||
styles={
|
||||
"nav-link": {
|
||||
"font-size": "12px",
|
||||
},
|
||||
},
|
||||
)
|
||||
if selected_resource == "Eval Tasks":
|
||||
eval_tasks()
|
||||
elif selected_resource == "Memory Banks":
|
||||
memory_banks()
|
||||
elif selected_resource == "Datasets":
|
||||
datasets()
|
||||
elif selected_resource == "Models":
|
||||
models()
|
||||
elif selected_resource == "Scoring Functions":
|
||||
scoring_functions()
|
||||
elif selected_resource == "Shields":
|
||||
shields()
|
||||
|
||||
|
||||
resources_page()
|
|
@ -7,13 +7,16 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
st.header("Scoring Functions")
|
||||
|
||||
scoring_functions_info = {
|
||||
s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()
|
||||
}
|
||||
def scoring_functions():
|
||||
st.header("Scoring Functions")
|
||||
|
||||
selected_scoring_function = st.selectbox(
|
||||
"Select a scoring function", list(scoring_functions_info.keys())
|
||||
)
|
||||
st.json(scoring_functions_info[selected_scoring_function], expanded=True)
|
||||
scoring_functions_info = {
|
||||
s.identifier: s.to_dict()
|
||||
for s in llama_stack_api.client.scoring_functions.list()
|
||||
}
|
||||
|
||||
selected_scoring_function = st.selectbox(
|
||||
"Select a scoring function", list(scoring_functions_info.keys())
|
||||
)
|
||||
st.json(scoring_functions_info[selected_scoring_function], expanded=True)
|
||||
|
|
|
@ -7,12 +7,14 @@
|
|||
import streamlit as st
|
||||
from modules.api import llama_stack_api
|
||||
|
||||
# Shields Section
|
||||
st.header("Shields")
|
||||
|
||||
shields_info = {
|
||||
s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()
|
||||
}
|
||||
def shields():
|
||||
# Shields Section
|
||||
st.header("Shields")
|
||||
|
||||
selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
|
||||
st.json(shields_info[selected_shield])
|
||||
shields_info = {
|
||||
s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()
|
||||
}
|
||||
|
||||
selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
|
||||
st.json(shields_info[selected_shield])
|
||||
|
|
|
@ -15,8 +15,8 @@ from modules.utils import process_dataset
|
|||
|
||||
def application_evaluation_page():
|
||||
|
||||
st.set_page_config(page_title="Application Evaluation", page_icon="🦙")
|
||||
st.title("🦙 Llama Stack Application Evaluations")
|
||||
st.set_page_config(page_title="Evaluations (Scoring)", page_icon="🦙")
|
||||
st.title("📊 Evaluations (Scoring)")
|
||||
|
||||
# File uploader
|
||||
uploaded_file = st.file_uploader("Upload Dataset", type=["csv", "xlsx", "xls"])
|
||||
|
|
|
@ -15,8 +15,8 @@ from modules.api import llama_stack_api
|
|||
|
||||
def native_evaluation_page():
|
||||
|
||||
st.set_page_config(page_title="Native Evaluations", page_icon="🦙")
|
||||
st.title("🦙 Llama Stack Native Evaluations")
|
||||
st.set_page_config(page_title="Evaluations (Generation + Scoring)", page_icon="🦙")
|
||||
st.title("📊 Evaluations (Generation + Scoring)")
|
||||
|
||||
# Create tabs
|
||||
task_tab, candidate_tab, params_tab, run_tab = st.tabs(
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
streamlit
|
||||
pandas
|
||||
llama-stack-client>=0.0.55
|
||||
streamlit-option-menu
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue