diff --git a/llama_stack/core/ui/Containerfile b/llama_stack/core/ui/Containerfile deleted file mode 100644 index 0126d1867..000000000 --- a/llama_stack/core/ui/Containerfile +++ /dev/null @@ -1,11 +0,0 @@ -# More info on playground configuration can be found here: -# https://llama-stack.readthedocs.io/en/latest/playground - -FROM python:3.12-slim -WORKDIR /app -COPY . /app/ -RUN /usr/local/bin/python -m pip install --upgrade pip && \ - /usr/local/bin/pip3 install -r requirements.txt -EXPOSE 8501 - -ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"] diff --git a/llama_stack/core/ui/README.md b/llama_stack/core/ui/README.md deleted file mode 100644 index 05b4adc26..000000000 --- a/llama_stack/core/ui/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# (Experimental) LLama Stack UI - -## Docker Setup - -:warning: This is a work in progress. - -## Developer Setup - -1. Start up Llama Stack API server. More details [here](https://llama-stack.readthedocs.io/en/latest/getting_started/index.html). - -``` -llama stack build --distro together --image-type venv - -llama stack run together -``` - -2. (Optional) Register datasets and eval tasks as resources. If you want to run pre-configured evaluation flows (e.g. Evaluations (Generation + Scoring) Page). - -```bash -llama-stack-client datasets register \ ---dataset-id "mmlu" \ ---provider-id "huggingface" \ ---url "https://huggingface.co/datasets/llamastack/evals" \ ---metadata '{"path": "llamastack/evals", "name": "evals__mmlu__details", "split": "train"}' \ ---schema '{"input_query": {"type": "string"}, "expected_answer": {"type": "string", "chat_completion_input": {"type": "string"}}}' -``` - -```bash -llama-stack-client benchmarks register \ ---eval-task-id meta-reference-mmlu \ ---provider-id meta-reference \ ---dataset-id mmlu \ ---scoring-functions basic::regex_parser_multiple_choice_answer -``` - -3. Start Streamlit UI - -```bash -uv run --with ".[ui]" streamlit run llama_stack.core/ui/app.py -``` - -## Environment Variables - -| Environment Variable | Description | Default Value | -|----------------------------|------------------------------------|---------------------------| -| LLAMA_STACK_ENDPOINT | The endpoint for the Llama Stack | http://localhost:8321 | -| FIREWORKS_API_KEY | API key for Fireworks provider | (empty string) | -| TOGETHER_API_KEY | API key for Together provider | (empty string) | -| SAMBANOVA_API_KEY | API key for SambaNova provider | (empty string) | -| OPENAI_API_KEY | API key for OpenAI provider | (empty string) | diff --git a/llama_stack/core/ui/__init__.py b/llama_stack/core/ui/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/app.py b/llama_stack/core/ui/app.py deleted file mode 100644 index 441f65d20..000000000 --- a/llama_stack/core/ui/app.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. -import streamlit as st - - -def main(): - # Evaluation pages - application_evaluation_page = st.Page( - "page/evaluations/app_eval.py", - title="Evaluations (Scoring)", - icon="šŸ“Š", - default=False, - ) - native_evaluation_page = st.Page( - "page/evaluations/native_eval.py", - title="Evaluations (Generation + Scoring)", - icon="šŸ“Š", - default=False, - ) - - # Playground pages - chat_page = st.Page("page/playground/chat.py", title="Chat", icon="šŸ’¬", default=True) - rag_page = st.Page("page/playground/rag.py", title="RAG", icon="šŸ’¬", default=False) - tool_page = st.Page("page/playground/tools.py", title="Tools", icon="šŸ› ", default=False) - - # Distribution pages - resources_page = st.Page("page/distribution/resources.py", title="Resources", icon="šŸ”", default=False) - provider_page = st.Page( - "page/distribution/providers.py", - title="API Providers", - icon="šŸ”", - default=False, - ) - - pg = st.navigation( - { - "Playground": [ - chat_page, - rag_page, - tool_page, - application_evaluation_page, - native_evaluation_page, - ], - "Inspect": [provider_page, resources_page], - }, - expanded=False, - ) - pg.run() - - -if __name__ == "__main__": - main() diff --git a/llama_stack/core/ui/modules/__init__.py b/llama_stack/core/ui/modules/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/modules/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/modules/api.py b/llama_stack/core/ui/modules/api.py deleted file mode 100644 index 9db87b280..000000000 --- a/llama_stack/core/ui/modules/api.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import os - -from llama_stack_client import LlamaStackClient - - -class LlamaStackApi: - def __init__(self): - self.client = LlamaStackClient( - base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:8321"), - provider_data={ - "fireworks_api_key": os.environ.get("FIREWORKS_API_KEY", ""), - "together_api_key": os.environ.get("TOGETHER_API_KEY", ""), - "sambanova_api_key": os.environ.get("SAMBANOVA_API_KEY", ""), - "openai_api_key": os.environ.get("OPENAI_API_KEY", ""), - "tavily_search_api_key": os.environ.get("TAVILY_SEARCH_API_KEY", ""), - }, - ) - - def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: dict | None): - """Run scoring on a single row""" - if not scoring_params: - scoring_params = dict.fromkeys(scoring_function_ids) - return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params) - - -llama_stack_api = LlamaStackApi() diff --git a/llama_stack/core/ui/modules/utils.py b/llama_stack/core/ui/modules/utils.py deleted file mode 100644 index 67cce98fa..000000000 --- a/llama_stack/core/ui/modules/utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import base64 -import os - -import pandas as pd -import streamlit as st - - -def process_dataset(file): - if file is None: - return "No file uploaded", None - - try: - # Determine file type and read accordingly - file_ext = os.path.splitext(file.name)[1].lower() - if file_ext == ".csv": - df = pd.read_csv(file) - elif file_ext in [".xlsx", ".xls"]: - df = pd.read_excel(file) - else: - return "Unsupported file format. Please upload a CSV or Excel file.", None - - return df - - except Exception as e: - st.error(f"Error processing file: {str(e)}") - return None - - -def data_url_from_file(file) -> str: - file_content = file.getvalue() - base64_content = base64.b64encode(file_content).decode("utf-8") - mime_type = file.type - - data_url = f"data:{mime_type};base64,{base64_content}" - - return data_url diff --git a/llama_stack/core/ui/page/__init__.py b/llama_stack/core/ui/page/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/page/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/page/distribution/__init__.py b/llama_stack/core/ui/page/distribution/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/page/distribution/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/page/distribution/datasets.py b/llama_stack/core/ui/page/distribution/datasets.py deleted file mode 100644 index aab0901ac..000000000 --- a/llama_stack/core/ui/page/distribution/datasets.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def datasets(): - st.header("Datasets") - - datasets_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()} - if len(datasets_info) > 0: - selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys())) - st.json(datasets_info[selected_dataset], expanded=True) diff --git a/llama_stack/core/ui/page/distribution/eval_tasks.py b/llama_stack/core/ui/page/distribution/eval_tasks.py deleted file mode 100644 index 1a0ce502b..000000000 --- a/llama_stack/core/ui/page/distribution/eval_tasks.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def benchmarks(): - # Benchmarks Section - st.header("Benchmarks") - - benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()} - - if len(benchmarks_info) > 0: - selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect") - st.json(benchmarks_info[selected_benchmark], expanded=True) diff --git a/llama_stack/core/ui/page/distribution/models.py b/llama_stack/core/ui/page/distribution/models.py deleted file mode 100644 index f84508746..000000000 --- a/llama_stack/core/ui/page/distribution/models.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def models(): - # Models Section - st.header("Models") - models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()} - - selected_model = st.selectbox("Select a model", list(models_info.keys())) - st.json(models_info[selected_model]) diff --git a/llama_stack/core/ui/page/distribution/providers.py b/llama_stack/core/ui/page/distribution/providers.py deleted file mode 100644 index 3ec6026d1..000000000 --- a/llama_stack/core/ui/page/distribution/providers.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def providers(): - st.header("šŸ” API Providers") - apis_providers_lst = llama_stack_api.client.providers.list() - api_to_providers = {} - for api_provider in apis_providers_lst: - if api_provider.api in api_to_providers: - api_to_providers[api_provider.api].append(api_provider) - else: - api_to_providers[api_provider.api] = [api_provider] - - for api in api_to_providers.keys(): - st.markdown(f"###### {api}") - st.dataframe([x.to_dict() for x in api_to_providers[api]], width=500) - - -providers() diff --git a/llama_stack/core/ui/page/distribution/resources.py b/llama_stack/core/ui/page/distribution/resources.py deleted file mode 100644 index c56fcfff3..000000000 --- a/llama_stack/core/ui/page/distribution/resources.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -from streamlit_option_menu import option_menu - -from llama_stack.core.ui.page.distribution.datasets import datasets -from llama_stack.core.ui.page.distribution.eval_tasks import benchmarks -from llama_stack.core.ui.page.distribution.models import models -from llama_stack.core.ui.page.distribution.scoring_functions import scoring_functions -from llama_stack.core.ui.page.distribution.shields import shields -from llama_stack.core.ui.page.distribution.vector_dbs import vector_dbs - - -def resources_page(): - options = [ - "Models", - "Vector Databases", - "Shields", - "Scoring Functions", - "Datasets", - "Benchmarks", - ] - icons = ["magic", "memory", "shield", "file-bar-graph", "database", "list-task"] - selected_resource = option_menu( - None, - options, - icons=icons, - orientation="horizontal", - styles={ - "nav-link": { - "font-size": "12px", - }, - }, - ) - if selected_resource == "Benchmarks": - benchmarks() - elif selected_resource == "Vector Databases": - vector_dbs() - elif selected_resource == "Datasets": - datasets() - elif selected_resource == "Models": - models() - elif selected_resource == "Scoring Functions": - scoring_functions() - elif selected_resource == "Shields": - shields() - - -resources_page() diff --git a/llama_stack/core/ui/page/distribution/scoring_functions.py b/llama_stack/core/ui/page/distribution/scoring_functions.py deleted file mode 100644 index 2a5196fa9..000000000 --- a/llama_stack/core/ui/page/distribution/scoring_functions.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def scoring_functions(): - st.header("Scoring Functions") - - scoring_functions_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()} - - selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys())) - st.json(scoring_functions_info[selected_scoring_function], expanded=True) diff --git a/llama_stack/core/ui/page/distribution/shields.py b/llama_stack/core/ui/page/distribution/shields.py deleted file mode 100644 index ecce2f12b..000000000 --- a/llama_stack/core/ui/page/distribution/shields.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def shields(): - # Shields Section - st.header("Shields") - - shields_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()} - - selected_shield = st.selectbox("Select a shield", list(shields_info.keys())) - st.json(shields_info[selected_shield]) diff --git a/llama_stack/core/ui/page/distribution/vector_dbs.py b/llama_stack/core/ui/page/distribution/vector_dbs.py deleted file mode 100644 index e81077d2a..000000000 --- a/llama_stack/core/ui/page/distribution/vector_dbs.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def vector_dbs(): - st.header("Vector Databases") - vector_dbs_info = {v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list()} - - if len(vector_dbs_info) > 0: - selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys())) - st.json(vector_dbs_info[selected_vector_db]) - else: - st.info("No vector databases found") diff --git a/llama_stack/core/ui/page/evaluations/__init__.py b/llama_stack/core/ui/page/evaluations/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/page/evaluations/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/page/evaluations/app_eval.py b/llama_stack/core/ui/page/evaluations/app_eval.py deleted file mode 100644 index 07e6349c9..000000000 --- a/llama_stack/core/ui/page/evaluations/app_eval.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import json - -import pandas as pd -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api -from llama_stack.core.ui.modules.utils import process_dataset - - -def application_evaluation_page(): - st.set_page_config(page_title="Evaluations (Scoring)", page_icon="šŸ¦™") - st.title("šŸ“Š Evaluations (Scoring)") - - # File uploader - uploaded_file = st.file_uploader("Upload Dataset", type=["csv", "xlsx", "xls"]) - - if uploaded_file is None: - st.error("No file uploaded") - return - - # Process uploaded file - df = process_dataset(uploaded_file) - if df is None: - st.error("Error processing file") - return - - # Display dataset information - st.success("Dataset loaded successfully!") - - # Display dataframe preview - st.subheader("Dataset Preview") - st.dataframe(df) - - # Select Scoring Functions to Run Evaluation On - st.subheader("Select Scoring Functions") - scoring_functions = llama_stack_api.client.scoring_functions.list() - scoring_functions = {sf.identifier: sf for sf in scoring_functions} - scoring_functions_names = list(scoring_functions.keys()) - selected_scoring_functions = st.multiselect( - "Choose one or more scoring functions", - options=scoring_functions_names, - help="Choose one or more scoring functions.", - ) - - available_models = llama_stack_api.client.models.list() - available_models = [m.identifier for m in available_models] - - scoring_params = {} - if selected_scoring_functions: - st.write("Selected:") - for scoring_fn_id in selected_scoring_functions: - scoring_fn = scoring_functions[scoring_fn_id] - st.write(f"- **{scoring_fn_id}**: {scoring_fn.description}") - new_params = None - if scoring_fn.params: - new_params = {} - for param_name, param_value in scoring_fn.params.to_dict().items(): - if param_name == "type": - new_params[param_name] = param_value - continue - - if param_name == "judge_model": - value = st.selectbox( - f"Select **{param_name}** for {scoring_fn_id}", - options=available_models, - index=0, - key=f"{scoring_fn_id}_{param_name}", - ) - new_params[param_name] = value - else: - value = st.text_area( - f"Enter value for **{param_name}** in {scoring_fn_id} in valid JSON format", - value=json.dumps(param_value, indent=2), - height=80, - ) - try: - new_params[param_name] = json.loads(value) - except json.JSONDecodeError: - st.error(f"Invalid JSON for **{param_name}** in {scoring_fn_id}") - - st.json(new_params) - scoring_params[scoring_fn_id] = new_params - - # Add run evaluation button & slider - total_rows = len(df) - num_rows = st.slider("Number of rows to evaluate", 1, total_rows, total_rows) - - if st.button("Run Evaluation"): - progress_text = "Running evaluation..." - progress_bar = st.progress(0, text=progress_text) - rows = df.to_dict(orient="records") - if num_rows < total_rows: - rows = rows[:num_rows] - - # Create separate containers for progress text and results - progress_text_container = st.empty() - results_container = st.empty() - output_res = {} - for i, r in enumerate(rows): - # Update progress - progress = i / len(rows) - progress_bar.progress(progress, text=progress_text) - - # Run evaluation for current row - score_res = llama_stack_api.run_scoring( - r, - scoring_function_ids=selected_scoring_functions, - scoring_params=scoring_params, - ) - - for k in r.keys(): - if k not in output_res: - output_res[k] = [] - output_res[k].append(r[k]) - - for fn_id in selected_scoring_functions: - if fn_id not in output_res: - output_res[fn_id] = [] - output_res[fn_id].append(score_res.results[fn_id].score_rows[0]) - - # Display current row results using separate containers - progress_text_container.write(f"Expand to see current processed result ({i + 1} / {len(rows)})") - results_container.json( - score_res.to_json(), - expanded=2, - ) - - progress_bar.progress(1.0, text="Evaluation complete!") - - # Display results in dataframe - if output_res: - output_df = pd.DataFrame(output_res) - st.subheader("Evaluation Results") - st.dataframe(output_df) - - -application_evaluation_page() diff --git a/llama_stack/core/ui/page/evaluations/native_eval.py b/llama_stack/core/ui/page/evaluations/native_eval.py deleted file mode 100644 index 2bef63b2f..000000000 --- a/llama_stack/core/ui/page/evaluations/native_eval.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import json - -import pandas as pd -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - - -def select_benchmark_1(): - # Select Benchmarks - st.subheader("1. Choose An Eval Task") - benchmarks = llama_stack_api.client.benchmarks.list() - benchmarks = {et.identifier: et for et in benchmarks} - benchmarks_names = list(benchmarks.keys()) - selected_benchmark = st.selectbox( - "Choose an eval task.", - options=benchmarks_names, - help="Choose an eval task. Each eval task is parameterized by a dataset, and list of scoring functions.", - ) - with st.expander("View Eval Task"): - st.json(benchmarks[selected_benchmark], expanded=True) - - st.session_state["selected_benchmark"] = selected_benchmark - st.session_state["benchmarks"] = benchmarks - if st.button("Confirm", key="confirm_1"): - st.session_state["selected_benchmark_1_next"] = True - - -def define_eval_candidate_2(): - if not st.session_state.get("selected_benchmark_1_next", None): - return - - st.subheader("2. Define Eval Candidate") - st.info( - """ - Define the configurations for the evaluation candidate model or agent used for generation. - Select "model" if you want to run generation with inference API, or "agent" if you want to run generation with agent API through specifying AgentConfig. - """ - ) - with st.expander("Define Eval Candidate", expanded=True): - # Define Eval Candidate - candidate_type = st.radio("Candidate Type", ["model", "agent"]) - - available_models = llama_stack_api.client.models.list() - available_models = [model.identifier for model in available_models] - selected_model = st.selectbox( - "Choose a model", - available_models, - index=0, - ) - - # Sampling Parameters - st.markdown("##### Sampling Parameters") - temperature = st.slider( - "Temperature", - min_value=0.0, - max_value=1.0, - value=0.0, - step=0.1, - help="Controls the randomness of the response. Higher values make the output more creative and unexpected, lower values make it more conservative and predictable", - ) - top_p = st.slider( - "Top P", - min_value=0.0, - max_value=1.0, - value=0.95, - step=0.1, - ) - max_tokens = st.slider( - "Max Tokens", - min_value=0, - max_value=4096, - value=512, - step=1, - help="The maximum number of tokens to generate", - ) - repetition_penalty = st.slider( - "Repetition Penalty", - min_value=1.0, - max_value=2.0, - value=1.0, - step=0.1, - help="Controls the likelihood for generating the same word or phrase multiple times in the same sentence or paragraph. 1 implies no penalty, 2 will strongly discourage model to repeat words or phrases.", - ) - if candidate_type == "model": - if temperature > 0.0: - strategy = { - "type": "top_p", - "temperature": temperature, - "top_p": top_p, - } - else: - strategy = {"type": "greedy"} - - eval_candidate = { - "type": "model", - "model": selected_model, - "sampling_params": { - "strategy": strategy, - "max_tokens": max_tokens, - "repetition_penalty": repetition_penalty, - }, - } - elif candidate_type == "agent": - system_prompt = st.text_area( - "System Prompt", - value="You are a helpful AI assistant.", - help="Initial instructions given to the AI to set its behavior and context", - ) - tools_json = st.text_area( - "Tools Configuration (JSON)", - value=json.dumps( - [ - { - "type": "brave_search", - "engine": "brave", - "api_key": "ENTER_BRAVE_API_KEY_HERE", - } - ] - ), - help="Enter tool configurations in JSON format. Each tool should have a name, description, and parameters.", - height=200, - ) - try: - tools = json.loads(tools_json) - except json.JSONDecodeError: - st.error("Invalid JSON format for tools configuration") - tools = [] - eval_candidate = { - "type": "agent", - "config": { - "model": selected_model, - "instructions": system_prompt, - "tools": tools, - "tool_choice": "auto", - "tool_prompt_format": "json", - "input_shields": [], - "output_shields": [], - "enable_session_persistence": False, - }, - } - st.session_state["eval_candidate"] = eval_candidate - - if st.button("Confirm", key="confirm_2"): - st.session_state["selected_eval_candidate_2_next"] = True - - -def run_evaluation_3(): - if not st.session_state.get("selected_eval_candidate_2_next", None): - return - - st.subheader("3. Run Evaluation") - # Add info box to explain configurations being used - st.info( - """ - Review the configurations that will be used for this evaluation run, make any necessary changes, and then click the "Run Evaluation" button. - """ - ) - selected_benchmark = st.session_state["selected_benchmark"] - benchmarks = st.session_state["benchmarks"] - eval_candidate = st.session_state["eval_candidate"] - - dataset_id = benchmarks[selected_benchmark].dataset_id - rows = llama_stack_api.client.datasets.iterrows( - dataset_id=dataset_id, - ) - total_rows = len(rows.data) - # Add number of examples control - num_rows = st.number_input( - "Number of Examples to Evaluate", - min_value=1, - max_value=total_rows, - value=5, - help="Number of examples from the dataset to evaluate. ", - ) - - benchmark_config = { - "type": "benchmark", - "eval_candidate": eval_candidate, - "scoring_params": {}, - } - - with st.expander("View Evaluation Task", expanded=True): - st.json(benchmarks[selected_benchmark], expanded=True) - with st.expander("View Evaluation Task Configuration", expanded=True): - st.json(benchmark_config, expanded=True) - - # Add run button and handle evaluation - if st.button("Run Evaluation"): - progress_text = "Running evaluation..." - progress_bar = st.progress(0, text=progress_text) - rows = rows.data - if num_rows < total_rows: - rows = rows[:num_rows] - - # Create separate containers for progress text and results - progress_text_container = st.empty() - results_container = st.empty() - output_res = {} - for i, r in enumerate(rows): - # Update progress - progress = i / len(rows) - progress_bar.progress(progress, text=progress_text) - # Run evaluation for current row - eval_res = llama_stack_api.client.eval.evaluate_rows( - benchmark_id=selected_benchmark, - input_rows=[r], - scoring_functions=benchmarks[selected_benchmark].scoring_functions, - benchmark_config=benchmark_config, - ) - - for k in r.keys(): - if k not in output_res: - output_res[k] = [] - output_res[k].append(r[k]) - - for k in eval_res.generations[0].keys(): - if k not in output_res: - output_res[k] = [] - output_res[k].append(eval_res.generations[0][k]) - - for scoring_fn in benchmarks[selected_benchmark].scoring_functions: - if scoring_fn not in output_res: - output_res[scoring_fn] = [] - output_res[scoring_fn].append(eval_res.scores[scoring_fn].score_rows[0]) - - progress_text_container.write(f"Expand to see current processed result ({i + 1} / {len(rows)})") - results_container.json(eval_res, expanded=2) - - progress_bar.progress(1.0, text="Evaluation complete!") - # Display results in dataframe - if output_res: - output_df = pd.DataFrame(output_res) - st.subheader("Evaluation Results") - st.dataframe(output_df) - - -def native_evaluation_page(): - st.set_page_config(page_title="Evaluations (Generation + Scoring)", page_icon="šŸ¦™") - st.title("šŸ“Š Evaluations (Generation + Scoring)") - - select_benchmark_1() - define_eval_candidate_2() - run_evaluation_3() - - -native_evaluation_page() diff --git a/llama_stack/core/ui/page/playground/__init__.py b/llama_stack/core/ui/page/playground/__init__.py deleted file mode 100644 index 756f351d8..000000000 --- a/llama_stack/core/ui/page/playground/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. diff --git a/llama_stack/core/ui/page/playground/chat.py b/llama_stack/core/ui/page/playground/chat.py deleted file mode 100644 index d391d0fb7..000000000 --- a/llama_stack/core/ui/page/playground/chat.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import streamlit as st - -from llama_stack.core.ui.modules.api import llama_stack_api - -# Sidebar configurations -with st.sidebar: - st.header("Configuration") - available_models = llama_stack_api.client.models.list() - available_models = [model.identifier for model in available_models if model.model_type == "llm"] - selected_model = st.selectbox( - "Choose a model", - available_models, - index=0, - ) - - temperature = st.slider( - "Temperature", - min_value=0.0, - max_value=1.0, - value=0.0, - step=0.1, - help="Controls the randomness of the response. Higher values make the output more creative and unexpected, lower values make it more conservative and predictable", - ) - - top_p = st.slider( - "Top P", - min_value=0.0, - max_value=1.0, - value=0.95, - step=0.1, - ) - - max_tokens = st.slider( - "Max Tokens", - min_value=0, - max_value=4096, - value=512, - step=1, - help="The maximum number of tokens to generate", - ) - - repetition_penalty = st.slider( - "Repetition Penalty", - min_value=1.0, - max_value=2.0, - value=1.0, - step=0.1, - help="Controls the likelihood for generating the same word or phrase multiple times in the same sentence or paragraph. 1 implies no penalty, 2 will strongly discourage model to repeat words or phrases.", - ) - - stream = st.checkbox("Stream", value=True) - system_prompt = st.text_area( - "System Prompt", - value="You are a helpful AI assistant.", - help="Initial instructions given to the AI to set its behavior and context", - ) - - # Add clear chat button to sidebar - if st.button("Clear Chat", use_container_width=True): - st.session_state.messages = [] - st.rerun() - - -# Main chat interface -st.title("šŸ¦™ Chat") - - -# Initialize chat history -if "messages" not in st.session_state: - st.session_state.messages = [] - -# Display chat messages -for message in st.session_state.messages: - with st.chat_message(message["role"]): - st.markdown(message["content"]) - -# Chat input -if prompt := st.chat_input("Example: What is Llama Stack?"): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - - # Display user message - with st.chat_message("user"): - st.markdown(prompt) - - # Display assistant response - with st.chat_message("assistant"): - message_placeholder = st.empty() - full_response = "" - - if temperature > 0.0: - strategy = { - "type": "top_p", - "temperature": temperature, - "top_p": top_p, - } - else: - strategy = {"type": "greedy"} - - response = llama_stack_api.client.inference.chat_completion( - messages=[ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": prompt}, - ], - model_id=selected_model, - stream=stream, - sampling_params={ - "strategy": strategy, - "max_tokens": max_tokens, - "repetition_penalty": repetition_penalty, - }, - ) - - if stream: - for chunk in response: - if chunk.event.event_type == "progress": - full_response += chunk.event.delta.text - message_placeholder.markdown(full_response + "ā–Œ") - message_placeholder.markdown(full_response) - else: - full_response = response.completion_message.content - message_placeholder.markdown(full_response) - - st.session_state.messages.append({"role": "assistant", "content": full_response}) diff --git a/llama_stack/core/ui/page/playground/rag.py b/llama_stack/core/ui/page/playground/rag.py deleted file mode 100644 index 2ffae1c33..000000000 --- a/llama_stack/core/ui/page/playground/rag.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import uuid - -import streamlit as st -from llama_stack_client import Agent, AgentEventLogger, RAGDocument - -from llama_stack.apis.common.content_types import ToolCallDelta -from llama_stack.core.ui.modules.api import llama_stack_api -from llama_stack.core.ui.modules.utils import data_url_from_file - - -def rag_chat_page(): - st.title("šŸ¦™ RAG") - - def reset_agent_and_chat(): - st.session_state.clear() - st.cache_resource.clear() - - def should_disable_input(): - return "displayed_messages" in st.session_state and len(st.session_state.displayed_messages) > 0 - - def log_message(message): - with st.chat_message(message["role"]): - if "tool_output" in message and message["tool_output"]: - with st.expander(label="Tool Output", expanded=False, icon="šŸ› "): - st.write(message["tool_output"]) - st.markdown(message["content"]) - - with st.sidebar: - # File/Directory Upload Section - st.subheader("Upload Documents", divider=True) - uploaded_files = st.file_uploader( - "Upload file(s) or directory", - accept_multiple_files=True, - type=["txt", "pdf", "doc", "docx"], # Add more file types as needed - ) - # Process uploaded files - if uploaded_files: - st.success(f"Successfully uploaded {len(uploaded_files)} files") - # Add memory bank name input field - vector_db_name = st.text_input( - "Document Collection Name", - value="rag_vector_db", - help="Enter a unique identifier for this document collection", - ) - if st.button("Create Document Collection"): - documents = [ - RAGDocument( - document_id=uploaded_file.name, - content=data_url_from_file(uploaded_file), - ) - for i, uploaded_file in enumerate(uploaded_files) - ] - - providers = llama_stack_api.client.providers.list() - vector_io_provider = None - - for x in providers: - if x.api == "vector_io": - vector_io_provider = x.provider_id - - llama_stack_api.client.vector_dbs.register( - vector_db_id=vector_db_name, # Use the user-provided name - embedding_dimension=384, - embedding_model="all-MiniLM-L6-v2", - provider_id=vector_io_provider, - ) - - # insert documents using the custom vector db name - llama_stack_api.client.tool_runtime.rag_tool.insert( - vector_db_id=vector_db_name, # Use the user-provided name - documents=documents, - chunk_size_in_tokens=512, - ) - st.success("Vector database created successfully!") - - st.subheader("RAG Parameters", divider=True) - - rag_mode = st.radio( - "RAG mode", - ["Direct", "Agent-based"], - captions=[ - "RAG is performed by directly retrieving the information and augmenting the user query", - "RAG is performed by an agent activating a dedicated knowledge search tool.", - ], - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - - # select memory banks - vector_dbs = llama_stack_api.client.vector_dbs.list() - vector_dbs = [vector_db.identifier for vector_db in vector_dbs] - selected_vector_dbs = st.multiselect( - label="Select Document Collections to use in RAG queries", - options=vector_dbs, - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - - st.subheader("Inference Parameters", divider=True) - available_models = llama_stack_api.client.models.list() - available_models = [model.identifier for model in available_models if model.model_type == "llm"] - selected_model = st.selectbox( - label="Choose a model", - options=available_models, - index=0, - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - system_prompt = st.text_area( - "System Prompt", - value="You are a helpful assistant. ", - help="Initial instructions given to the AI to set its behavior and context", - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - temperature = st.slider( - "Temperature", - min_value=0.0, - max_value=1.0, - value=0.0, - step=0.1, - help="Controls the randomness of the response. Higher values make the output more creative and unexpected, lower values make it more conservative and predictable", - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - - top_p = st.slider( - "Top P", - min_value=0.0, - max_value=1.0, - value=0.95, - step=0.1, - on_change=reset_agent_and_chat, - disabled=should_disable_input(), - ) - - # Add clear chat button to sidebar - if st.button("Clear Chat", use_container_width=True): - reset_agent_and_chat() - st.rerun() - - # Chat Interface - if "messages" not in st.session_state: - st.session_state.messages = [] - if "displayed_messages" not in st.session_state: - st.session_state.displayed_messages = [] - - # Display chat history - for message in st.session_state.displayed_messages: - log_message(message) - - if temperature > 0.0: - strategy = { - "type": "top_p", - "temperature": temperature, - "top_p": top_p, - } - else: - strategy = {"type": "greedy"} - - @st.cache_resource - def create_agent(): - return Agent( - llama_stack_api.client, - model=selected_model, - instructions=system_prompt, - sampling_params={ - "strategy": strategy, - }, - tools=[ - dict( - name="builtin::rag/knowledge_search", - args={ - "vector_db_ids": list(selected_vector_dbs), - }, - ) - ], - ) - - if rag_mode == "Agent-based": - agent = create_agent() - if "agent_session_id" not in st.session_state: - st.session_state["agent_session_id"] = agent.create_session(session_name=f"rag_demo_{uuid.uuid4()}") - - session_id = st.session_state["agent_session_id"] - - def agent_process_prompt(prompt): - # Add user message to chat history - st.session_state.messages.append({"role": "user", "content": prompt}) - - # Send the prompt to the agent - response = agent.create_turn( - messages=[ - { - "role": "user", - "content": prompt, - } - ], - session_id=session_id, - ) - - # Display assistant response - with st.chat_message("assistant"): - retrieval_message_placeholder = st.expander(label="Tool Output", expanded=False, icon="šŸ› ") - message_placeholder = st.empty() - full_response = "" - retrieval_response = "" - for log in AgentEventLogger().log(response): - log.print() - if log.role == "tool_execution": - retrieval_response += log.content.replace("====", "").strip() - retrieval_message_placeholder.write(retrieval_response) - else: - full_response += log.content - message_placeholder.markdown(full_response + "ā–Œ") - message_placeholder.markdown(full_response) - - st.session_state.messages.append({"role": "assistant", "content": full_response}) - st.session_state.displayed_messages.append( - {"role": "assistant", "content": full_response, "tool_output": retrieval_response} - ) - - def direct_process_prompt(prompt): - # Add the system prompt in the beginning of the conversation - if len(st.session_state.messages) == 0: - st.session_state.messages.append({"role": "system", "content": system_prompt}) - - # Query the vector DB - rag_response = llama_stack_api.client.tool_runtime.rag_tool.query( - content=prompt, vector_db_ids=list(selected_vector_dbs) - ) - prompt_context = rag_response.content - - with st.chat_message("assistant"): - with st.expander(label="Retrieval Output", expanded=False): - st.write(prompt_context) - - retrieval_message_placeholder = st.empty() - message_placeholder = st.empty() - full_response = "" - retrieval_response = "" - - # Construct the extended prompt - extended_prompt = f"Please answer the following query using the context below.\n\nCONTEXT:\n{prompt_context}\n\nQUERY:\n{prompt}" - - # Run inference directly - st.session_state.messages.append({"role": "user", "content": extended_prompt}) - response = llama_stack_api.client.inference.chat_completion( - messages=st.session_state.messages, - model_id=selected_model, - sampling_params={ - "strategy": strategy, - }, - stream=True, - ) - - # Display assistant response - for chunk in response: - response_delta = chunk.event.delta - if isinstance(response_delta, ToolCallDelta): - retrieval_response += response_delta.tool_call.replace("====", "").strip() - retrieval_message_placeholder.info(retrieval_response) - else: - full_response += chunk.event.delta.text - message_placeholder.markdown(full_response + "ā–Œ") - message_placeholder.markdown(full_response) - - response_dict = {"role": "assistant", "content": full_response, "stop_reason": "end_of_message"} - st.session_state.messages.append(response_dict) - st.session_state.displayed_messages.append(response_dict) - - # Chat input - if prompt := st.chat_input("Ask a question about your documents"): - # Add user message to chat history - st.session_state.displayed_messages.append({"role": "user", "content": prompt}) - - # Display user message - with st.chat_message("user"): - st.markdown(prompt) - - # store the prompt to process it after page refresh - st.session_state.prompt = prompt - - # force page refresh to disable the settings widgets - st.rerun() - - if "prompt" in st.session_state and st.session_state.prompt is not None: - if rag_mode == "Agent-based": - agent_process_prompt(st.session_state.prompt) - else: # rag_mode == "Direct" - direct_process_prompt(st.session_state.prompt) - st.session_state.prompt = None - - -rag_chat_page() diff --git a/llama_stack/core/ui/page/playground/tools.py b/llama_stack/core/ui/page/playground/tools.py deleted file mode 100644 index 602c9eea1..000000000 --- a/llama_stack/core/ui/page/playground/tools.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the terms described in the LICENSE file in -# the root directory of this source tree. - -import enum -import json -import uuid - -import streamlit as st -from llama_stack_client import Agent -from llama_stack_client.lib.agents.react.agent import ReActAgent -from llama_stack_client.lib.agents.react.tool_parser import ReActOutput - -from llama_stack.core.ui.modules.api import llama_stack_api - - -class AgentType(enum.Enum): - REGULAR = "Regular" - REACT = "ReAct" - - -def tool_chat_page(): - st.title("šŸ›  Tools") - - client = llama_stack_api.client - models = client.models.list() - model_list = [model.identifier for model in models if model.api_model_type == "llm"] - - tool_groups = client.toolgroups.list() - tool_groups_list = [tool_group.identifier for tool_group in tool_groups] - mcp_tools_list = [tool for tool in tool_groups_list if tool.startswith("mcp::")] - builtin_tools_list = [tool for tool in tool_groups_list if not tool.startswith("mcp::")] - selected_vector_dbs = [] - - def reset_agent(): - st.session_state.clear() - st.cache_resource.clear() - - with st.sidebar: - st.title("Configuration") - st.subheader("Model") - model = st.selectbox(label="Model", options=model_list, on_change=reset_agent, label_visibility="collapsed") - - st.subheader("Available ToolGroups") - - toolgroup_selection = st.pills( - label="Built-in tools", - options=builtin_tools_list, - selection_mode="multi", - on_change=reset_agent, - format_func=lambda tool: "".join(tool.split("::")[1:]), - help="List of built-in tools from your llama stack server.", - ) - - if "builtin::rag" in toolgroup_selection: - vector_dbs = llama_stack_api.client.vector_dbs.list() or [] - if not vector_dbs: - st.info("No vector databases available for selection.") - vector_dbs = [vector_db.identifier for vector_db in vector_dbs] - selected_vector_dbs = st.multiselect( - label="Select Document Collections to use in RAG queries", - options=vector_dbs, - on_change=reset_agent, - ) - - mcp_selection = st.pills( - label="MCP Servers", - options=mcp_tools_list, - selection_mode="multi", - on_change=reset_agent, - format_func=lambda tool: "".join(tool.split("::")[1:]), - help="List of MCP servers registered to your llama stack server.", - ) - - toolgroup_selection.extend(mcp_selection) - - grouped_tools = {} - total_tools = 0 - - for toolgroup_id in toolgroup_selection: - tools = client.tools.list(toolgroup_id=toolgroup_id) - grouped_tools[toolgroup_id] = [tool.identifier for tool in tools] - total_tools += len(tools) - - st.markdown(f"Active Tools: šŸ›  {total_tools}") - - for group_id, tools in grouped_tools.items(): - with st.expander(f"šŸ”§ Tools from `{group_id}`"): - for idx, tool in enumerate(tools, start=1): - st.markdown(f"{idx}. `{tool.split(':')[-1]}`") - - st.subheader("Agent Configurations") - st.subheader("Agent Type") - agent_type = st.radio( - label="Select Agent Type", - options=["Regular", "ReAct"], - on_change=reset_agent, - ) - - if agent_type == "ReAct": - agent_type = AgentType.REACT - else: - agent_type = AgentType.REGULAR - - max_tokens = st.slider( - "Max Tokens", - min_value=0, - max_value=4096, - value=512, - step=64, - help="The maximum number of tokens to generate", - on_change=reset_agent, - ) - - for i, tool_name in enumerate(toolgroup_selection): - if tool_name == "builtin::rag": - tool_dict = dict( - name="builtin::rag", - args={ - "vector_db_ids": list(selected_vector_dbs), - }, - ) - toolgroup_selection[i] = tool_dict - - @st.cache_resource - def create_agent(): - if "agent_type" in st.session_state and st.session_state.agent_type == AgentType.REACT: - return ReActAgent( - client=client, - model=model, - tools=toolgroup_selection, - response_format={ - "type": "json_schema", - "json_schema": ReActOutput.model_json_schema(), - }, - sampling_params={"strategy": {"type": "greedy"}, "max_tokens": max_tokens}, - ) - else: - return Agent( - client, - model=model, - instructions="You are a helpful assistant. When you use a tool always respond with a summary of the result.", - tools=toolgroup_selection, - sampling_params={"strategy": {"type": "greedy"}, "max_tokens": max_tokens}, - ) - - st.session_state.agent_type = agent_type - - agent = create_agent() - - if "agent_session_id" not in st.session_state: - st.session_state["agent_session_id"] = agent.create_session(session_name=f"tool_demo_{uuid.uuid4()}") - - session_id = st.session_state["agent_session_id"] - - if "messages" not in st.session_state: - st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}] - - for msg in st.session_state.messages: - with st.chat_message(msg["role"]): - st.markdown(msg["content"]) - - if prompt := st.chat_input(placeholder=""): - with st.chat_message("user"): - st.markdown(prompt) - - st.session_state.messages.append({"role": "user", "content": prompt}) - - turn_response = agent.create_turn( - session_id=session_id, - messages=[{"role": "user", "content": prompt}], - stream=True, - ) - - def response_generator(turn_response): - if st.session_state.get("agent_type") == AgentType.REACT: - return _handle_react_response(turn_response) - else: - return _handle_regular_response(turn_response) - - def _handle_react_response(turn_response): - current_step_content = "" - final_answer = None - tool_results = [] - - for response in turn_response: - if not hasattr(response.event, "payload"): - yield ( - "\n\n🚨 :red[_Llama Stack server Error:_]\n" - "The response received is missing an expected `payload` attribute.\n" - "This could indicate a malformed response or an internal issue within the server.\n\n" - f"Error details: {response}" - ) - return - - payload = response.event.payload - - if payload.event_type == "step_progress" and hasattr(payload.delta, "text"): - current_step_content += payload.delta.text - continue - - if payload.event_type == "step_complete": - step_details = payload.step_details - - if step_details.step_type == "inference": - yield from _process_inference_step(current_step_content, tool_results, final_answer) - current_step_content = "" - elif step_details.step_type == "tool_execution": - tool_results = _process_tool_execution(step_details, tool_results) - current_step_content = "" - else: - current_step_content = "" - - if not final_answer and tool_results: - yield from _format_tool_results_summary(tool_results) - - def _process_inference_step(current_step_content, tool_results, final_answer): - try: - react_output_data = json.loads(current_step_content) - thought = react_output_data.get("thought") - action = react_output_data.get("action") - answer = react_output_data.get("answer") - - if answer and answer != "null" and answer is not None: - final_answer = answer - - if thought: - with st.expander("šŸ¤” Thinking...", expanded=False): - st.markdown(f":grey[__{thought}__]") - - if action and isinstance(action, dict): - tool_name = action.get("tool_name") - tool_params = action.get("tool_params") - with st.expander(f'šŸ›  Action: Using tool "{tool_name}"', expanded=False): - st.json(tool_params) - - if answer and answer != "null" and answer is not None: - yield f"\n\nāœ… **Final Answer:**\n{answer}" - - except json.JSONDecodeError: - yield f"\n\nFailed to parse ReAct step content:\n```json\n{current_step_content}\n```" - except Exception as e: - yield f"\n\nFailed to process ReAct step: {e}\n```json\n{current_step_content}\n```" - - return final_answer - - def _process_tool_execution(step_details, tool_results): - try: - if hasattr(step_details, "tool_responses") and step_details.tool_responses: - for tool_response in step_details.tool_responses: - tool_name = tool_response.tool_name - content = tool_response.content - tool_results.append((tool_name, content)) - with st.expander(f'āš™ļø Observation (Result from "{tool_name}")', expanded=False): - try: - parsed_content = json.loads(content) - st.json(parsed_content) - except json.JSONDecodeError: - st.code(content, language=None) - else: - with st.expander("āš™ļø Observation", expanded=False): - st.markdown(":grey[_Tool execution step completed, but no response data found._]") - except Exception as e: - with st.expander("āš™ļø Error in Tool Execution", expanded=False): - st.markdown(f":red[_Error processing tool execution: {str(e)}_]") - - return tool_results - - def _format_tool_results_summary(tool_results): - yield "\n\n**Here's what I found:**\n" - for tool_name, content in tool_results: - try: - parsed_content = json.loads(content) - - if tool_name == "web_search" and "top_k" in parsed_content: - yield from _format_web_search_results(parsed_content) - elif "results" in parsed_content and isinstance(parsed_content["results"], list): - yield from _format_results_list(parsed_content["results"]) - elif isinstance(parsed_content, dict) and len(parsed_content) > 0: - yield from _format_dict_results(parsed_content) - elif isinstance(parsed_content, list) and len(parsed_content) > 0: - yield from _format_list_results(parsed_content) - except json.JSONDecodeError: - yield f"\n**{tool_name}** was used but returned complex data. Check the observation for details.\n" - except (TypeError, AttributeError, KeyError, IndexError) as e: - print(f"Error processing {tool_name} result: {type(e).__name__}: {e}") - - def _format_web_search_results(parsed_content): - for i, result in enumerate(parsed_content["top_k"], 1): - if i <= 3: - title = result.get("title", "Untitled") - url = result.get("url", "") - content_text = result.get("content", "").strip() - yield f"\n- **{title}**\n {content_text}\n [Source]({url})\n" - - def _format_results_list(results): - for i, result in enumerate(results, 1): - if i <= 3: - if isinstance(result, dict): - name = result.get("name", result.get("title", "Result " + str(i))) - description = result.get("description", result.get("content", result.get("summary", ""))) - yield f"\n- **{name}**\n {description}\n" - else: - yield f"\n- {result}\n" - - def _format_dict_results(parsed_content): - yield "\n```\n" - for key, value in list(parsed_content.items())[:5]: - if isinstance(value, str) and len(value) < 100: - yield f"{key}: {value}\n" - else: - yield f"{key}: [Complex data]\n" - yield "```\n" - - def _format_list_results(parsed_content): - yield "\n" - for _, item in enumerate(parsed_content[:3], 1): - if isinstance(item, str): - yield f"- {item}\n" - elif isinstance(item, dict) and "text" in item: - yield f"- {item['text']}\n" - elif isinstance(item, dict) and len(item) > 0: - first_value = next(iter(item.values())) - if isinstance(first_value, str) and len(first_value) < 100: - yield f"- {first_value}\n" - - def _handle_regular_response(turn_response): - for response in turn_response: - if hasattr(response.event, "payload"): - print(response.event.payload) - if response.event.payload.event_type == "step_progress": - if hasattr(response.event.payload.delta, "text"): - yield response.event.payload.delta.text - if response.event.payload.event_type == "step_complete": - if response.event.payload.step_details.step_type == "tool_execution": - if response.event.payload.step_details.tool_calls: - tool_name = str(response.event.payload.step_details.tool_calls[0].tool_name) - yield f'\n\nšŸ›  :grey[_Using "{tool_name}" tool:_]\n\n' - else: - yield "No tool_calls present in step_details" - else: - yield f"Error occurred in the Llama Stack Cluster: {response}" - - with st.chat_message("assistant"): - response_content = st.write_stream(response_generator(turn_response)) - - st.session_state.messages.append({"role": "assistant", "content": response_content}) - - -tool_chat_page() diff --git a/llama_stack/core/ui/requirements.txt b/llama_stack/core/ui/requirements.txt deleted file mode 100644 index 53a1e7bf3..000000000 --- a/llama_stack/core/ui/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -llama-stack>=0.2.1 -llama-stack-client>=0.2.1 -pandas -streamlit -streamlit-option-menu diff --git a/pyproject.toml b/pyproject.toml index ce95b758f..b6522fc5e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,13 +52,6 @@ dependencies = [ ] [project.optional-dependencies] -ui = [ - "streamlit", - "pandas", - "llama-stack-client>=0.2.21", - "streamlit-option-menu", -] - [dependency-groups] dev = [ "pytest>=8.4", diff --git a/uv.lock b/uv.lock index 065eb3876..7151c40d1 100644 --- a/uv.lock +++ b/uv.lock @@ -104,22 +104,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/7e/b3/6b4067be973ae96ba0d615946e314c5ae35f9f993eca561b356540bb0c2b/alabaster-1.0.0-py3-none-any.whl", hash = "sha256:fc6786402dc3fcb2de3cabd5fe455a2db534b371124f1f21de8731783dec828b", size = 13929, upload-time = "2024-07-26T18:15:02.05Z" }, ] -[[package]] -name = "altair" -version = "5.5.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "jinja2" }, - { name = "jsonschema" }, - { name = "narwhals" }, - { name = "packaging" }, - { name = "typing-extensions", marker = "python_full_version < '3.14'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/16/b1/f2969c7bdb8ad8bbdda031687defdce2c19afba2aa2c8e1d2a17f78376d8/altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d", size = 705305, upload-time = "2024-11-23T23:39:58.542Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" }, -] - [[package]] name = "annotated-types" version = "0.7.0" @@ -1176,30 +1160,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/84/ca/c4e36a9b1bcce9958d8886aa4f7b262c8e9a7c43a284f2d79abfc9ba715d/geventhttpclient-2.3.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:416cc70adb3d34759e782d2e120b4432752399b85ac9758932ecd12274a104c3", size = 114999, upload-time = "2025-08-24T12:17:19.978Z" }, ] -[[package]] -name = "gitdb" -version = "4.0.12" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "smmap" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" }, -] - -[[package]] -name = "gitpython" -version = "3.1.45" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "gitdb" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/9a/c8/dd58967d119baab745caec2f9d853297cec1989ec1d63f677d3880632b88/gitpython-3.1.45.tar.gz", hash = "sha256:85b0ee964ceddf211c41b9f27a49086010a190fd8132a24e21f362a4b36a791c", size = 215076, upload-time = "2025-07-24T03:45:54.871Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/01/61/d4b89fec821f72385526e1b9d9a3a0385dda4a72b206d28049e2c7cd39b8/gitpython-3.1.45-py3-none-any.whl", hash = "sha256:8908cb2e02fb3b93b7eb0f2827125cb699869470432cc885f019b8fd0fccff77", size = 208168, upload-time = "2025-07-24T03:45:52.517Z" }, -] - [[package]] name = "google-auth" version = "1.6.3" @@ -1779,14 +1739,6 @@ dependencies = [ { name = "uvicorn" }, ] -[package.optional-dependencies] -ui = [ - { name = "llama-stack-client" }, - { name = "pandas" }, - { name = "streamlit" }, - { name = "streamlit-option-menu" }, -] - [package.dev-dependencies] benchmark = [ { name = "locust" }, @@ -1886,11 +1838,9 @@ requires-dist = [ { name = "jinja2", specifier = ">=3.1.6" }, { name = "jsonschema" }, { name = "llama-stack-client", specifier = ">=0.2.21" }, - { name = "llama-stack-client", marker = "extra == 'ui'", specifier = ">=0.2.21" }, { name = "openai", specifier = ">=1.100.0" }, { name = "opentelemetry-exporter-otlp-proto-http", specifier = ">=1.30.0" }, { name = "opentelemetry-sdk", specifier = ">=1.30.0" }, - { name = "pandas", marker = "extra == 'ui'" }, { name = "pillow" }, { name = "prompt-toolkit" }, { name = "pydantic", specifier = ">=2" }, @@ -1899,13 +1849,10 @@ requires-dist = [ { name = "python-multipart", specifier = ">=0.0.20" }, { name = "rich" }, { name = "starlette" }, - { name = "streamlit", marker = "extra == 'ui'" }, - { name = "streamlit-option-menu", marker = "extra == 'ui'" }, { name = "termcolor" }, { name = "tiktoken" }, { name = "uvicorn", specifier = ">=0.34.0" }, ] -provides-extras = ["ui"] [package.metadata.requires-dev] benchmark = [{ name = "locust", specifier = ">=2.39.1" }] @@ -2437,15 +2384,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5f/df/76d0321c3797b54b60fef9ec3bd6f4cfd124b9e422182156a1dd418722cf/myst_parser-4.0.1-py3-none-any.whl", hash = "sha256:9134e88959ec3b5780aedf8a99680ea242869d012e8821db3126d427edc9c95d", size = 84579, upload-time = "2025-02-12T10:53:02.078Z" }, ] -[[package]] -name = "narwhals" -version = "2.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/d9/58/0fbbfb13662297c8447d1872670da79f3f2a63fb68ae9aac9965cdc2d428/narwhals-2.0.0.tar.gz", hash = "sha256:d967bea54dfb6cd787abf3865ab4d72b8259d8f798c1c12c4eb693d5e9cebb24", size = 525527, upload-time = "2025-07-28T08:12:43.407Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/93/9d/9e2afb7d3d43bfa1a1f80d2da291064753305f9871851f1cd5a60d870893/narwhals-2.0.0-py3-none-any.whl", hash = "sha256:9c9fe8a969b090d783edbcb3b58e1d0d15f5100fdf85b53f5e76d38f4ce7f19a", size = 385206, upload-time = "2025-07-28T08:12:39.545Z" }, -] - [[package]] name = "nbformat" version = "5.10.4" @@ -3462,19 +3400,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" }, ] -[[package]] -name = "pydeck" -version = "0.9.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "jinja2" }, - { name = "numpy" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/40e14e196864a0f61a92abb14d09b3d3da98f94ccb03b49cf51688140dab/pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605", size = 3832240, upload-time = "2024-05-10T15:36:21.153Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038", size = 6900403, upload-time = "2024-05-10T15:36:17.36Z" }, -] - [[package]] name = "pygments" version = "2.19.2" @@ -4159,15 +4084,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, ] -[[package]] -name = "smmap" -version = "5.0.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, -] - [[package]] name = "sniffio" version = "1.3.1" @@ -4542,47 +4458,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, ] -[[package]] -name = "streamlit" -version = "1.47.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "altair" }, - { name = "blinker" }, - { name = "cachetools" }, - { name = "click" }, - { name = "gitpython" }, - { name = "numpy" }, - { name = "packaging" }, - { name = "pandas" }, - { name = "pillow" }, - { name = "protobuf" }, - { name = "pyarrow" }, - { name = "pydeck" }, - { name = "requests" }, - { name = "tenacity" }, - { name = "toml" }, - { name = "tornado" }, - { name = "typing-extensions" }, - { name = "watchdog", marker = "sys_platform != 'darwin'" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/19/da/cef67ed4614f04932a00068fe6291455deb884a04fd94f7ad78492b0e91a/streamlit-1.47.1.tar.gz", hash = "sha256:daed79763d1cafeb03cdd800b91aa9c7adc3688c6b2cbf4ecc2ca899aab82a2a", size = 9544057, upload-time = "2025-07-25T15:37:08.482Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/c0/4d/701f5fcf9c0d388dad9d94ba272d333c7efa6231ddee1babc59d26dc14d2/streamlit-1.47.1-py3-none-any.whl", hash = "sha256:c7881549e3ba1daecfb5541f32ee6ff70e549f1c3400c92d045897cb7a29772a", size = 9944872, upload-time = "2025-07-25T15:37:05.758Z" }, -] - -[[package]] -name = "streamlit-option-menu" -version = "0.4.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "streamlit" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/5e/27/72dc451cdaef1714fd0d75cc430e50a06c12c9046295fdf1f94af1b766eb/streamlit-option-menu-0.4.0.tar.gz", hash = "sha256:48ec69d59e547fa2fa4bfae001620df8af56a80de2f765ddbb9fcbfb84017129", size = 827290, upload-time = "2024-10-09T14:51:01.685Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fd/52/2f525ad4262dc83d67297f69ec5afcee1438b9e9ae22aa318396725ddbed/streamlit_option_menu-0.4.0-py3-none-any.whl", hash = "sha256:a55fc7554047b6db371595af2182e435b8a2c715ee6124e8543685bd4670b07e", size = 829255, upload-time = "2024-10-09T14:50:59.707Z" }, -] - [[package]] name = "sympy" version = "1.14.0" @@ -4694,15 +4569,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/41/f2/fd673d979185f5dcbac4be7d09461cbb99751554ffb6718d0013af8604cb/tokenizers-0.21.4-cp39-abi3-win_amd64.whl", hash = "sha256:475d807a5c3eb72c59ad9b5fcdb254f6e17f53dfcbb9903233b0dfa9c943b597", size = 2507568, upload-time = "2025-07-28T15:48:55.456Z" }, ] -[[package]] -name = "toml" -version = "0.10.2" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" }, -] - [[package]] name = "tomli" version = "2.2.1" @@ -5068,24 +4934,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/5c/c6/f8f28009920a736d0df434b52e9feebfb4d702ba942f15338cb4a83eafc1/virtualenv-20.32.0-py3-none-any.whl", hash = "sha256:2c310aecb62e5aa1b06103ed7c2977b81e042695de2697d01017ff0f1034af56", size = 6057761, upload-time = "2025-07-21T04:09:48.059Z" }, ] -[[package]] -name = "watchdog" -version = "6.0.0" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, - { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, - { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, - { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, - { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, - { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, - { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, - { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, - { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, -] - [[package]] name = "watchfiles" version = "1.1.0"