From b174effe050623354d738420f878bc30f29dc4d0 Mon Sep 17 00:00:00 2001 From: Angel Nunez Mencias Date: Tue, 3 Jun 2025 20:07:06 +0200 Subject: [PATCH] fix security update --- llama_stack/distribution/ui/app.py | 8 +++++++- llama_stack/distribution/ui/modules/api.py | 2 -- .../ui/page/distribution/datasets.py | 4 ++-- .../ui/page/distribution/eval_tasks.py | 4 ++-- .../ui/page/distribution/models.py | 4 ++-- .../ui/page/distribution/providers.py | 4 ++-- .../ui/page/distribution/scoring_functions.py | 4 ++-- .../ui/page/distribution/shields.py | 4 ++-- .../ui/page/distribution/vector_dbs.py | 4 ++-- .../ui/page/evaluations/app_eval.py | 8 ++++---- .../ui/page/evaluations/native_eval.py | 10 +++++----- .../distribution/ui/page/playground/chat.py | 6 +++--- .../distribution/ui/page/playground/rag.py | 18 +++++++++--------- .../distribution/ui/page/playground/tools.py | 6 +++--- 14 files changed, 45 insertions(+), 41 deletions(-) diff --git a/llama_stack/distribution/ui/app.py b/llama_stack/distribution/ui/app.py index a7a13bd99..a9a28b445 100644 --- a/llama_stack/distribution/ui/app.py +++ b/llama_stack/distribution/ui/app.py @@ -73,8 +73,14 @@ if __name__ == "__main__": url=keycloak_url, realm=keycloak_realm, client_id=keycloak_client_id, + custom_labels={ + "labelButton": "Sign in to kvant", + "labelLogin": "Please sign in to your kvant account.", + "errorNoPopup": "Unable to open the authentication popup. Allow popups and refresh the page to proceed.", + "errorPopupClosed": "Authentication popup was closed manually.", + "errorFatal": "Unable to connect to Keycloak using the current configuration." + }, auto_refresh=True, - init_options={"checkLoginIframe": False}, ) if keycloak.authenticated: diff --git a/llama_stack/distribution/ui/modules/api.py b/llama_stack/distribution/ui/modules/api.py index 44969ea71..a426e59ba 100644 --- a/llama_stack/distribution/ui/modules/api.py +++ b/llama_stack/distribution/ui/modules/api.py @@ -30,5 +30,3 @@ class LlamaStackApi: scoring_params = {fn_id: None for fn_id in scoring_function_ids} return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params) - -llama_stack_api = LlamaStackApi() diff --git a/llama_stack/distribution/ui/page/distribution/datasets.py b/llama_stack/distribution/ui/page/distribution/datasets.py index 6842b29a7..89f645ca8 100644 --- a/llama_stack/distribution/ui/page/distribution/datasets.py +++ b/llama_stack/distribution/ui/page/distribution/datasets.py @@ -6,13 +6,13 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def datasets(): st.header("Datasets") - datasets_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()} + datasets_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.datasets.list()} if len(datasets_info) > 0: selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys())) st.json(datasets_info[selected_dataset], expanded=True) diff --git a/llama_stack/distribution/ui/page/distribution/eval_tasks.py b/llama_stack/distribution/ui/page/distribution/eval_tasks.py index 492be4700..2b70f9202 100644 --- a/llama_stack/distribution/ui/page/distribution/eval_tasks.py +++ b/llama_stack/distribution/ui/page/distribution/eval_tasks.py @@ -6,14 +6,14 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def benchmarks(): # Benchmarks Section st.header("Benchmarks") - benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()} + benchmarks_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.benchmarks.list()} if len(benchmarks_info) > 0: selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect") diff --git a/llama_stack/distribution/ui/page/distribution/models.py b/llama_stack/distribution/ui/page/distribution/models.py index f29459098..3b96f179f 100644 --- a/llama_stack/distribution/ui/page/distribution/models.py +++ b/llama_stack/distribution/ui/page/distribution/models.py @@ -6,13 +6,13 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def models(): # Models Section st.header("Models") - models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()} + models_info = {m.identifier: m.to_dict() for m in LlamaStackApi().client.models.list()} selected_model = st.selectbox("Select a model", list(models_info.keys())) st.json(models_info[selected_model]) diff --git a/llama_stack/distribution/ui/page/distribution/providers.py b/llama_stack/distribution/ui/page/distribution/providers.py index c660cb986..116237b13 100644 --- a/llama_stack/distribution/ui/page/distribution/providers.py +++ b/llama_stack/distribution/ui/page/distribution/providers.py @@ -6,12 +6,12 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def providers(): st.header("🔍 API Providers") - apis_providers_lst = llama_stack_api.client.providers.list() + apis_providers_lst = LlamaStackApi().client.providers.list() api_to_providers = {} for api_provider in apis_providers_lst: if api_provider.api in api_to_providers: diff --git a/llama_stack/distribution/ui/page/distribution/scoring_functions.py b/llama_stack/distribution/ui/page/distribution/scoring_functions.py index 193146356..3c3428f44 100644 --- a/llama_stack/distribution/ui/page/distribution/scoring_functions.py +++ b/llama_stack/distribution/ui/page/distribution/scoring_functions.py @@ -6,13 +6,13 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def scoring_functions(): st.header("Scoring Functions") - scoring_functions_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()} + scoring_functions_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.scoring_functions.list()} selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys())) st.json(scoring_functions_info[selected_scoring_function], expanded=True) diff --git a/llama_stack/distribution/ui/page/distribution/shields.py b/llama_stack/distribution/ui/page/distribution/shields.py index 67d66d64f..84b583980 100644 --- a/llama_stack/distribution/ui/page/distribution/shields.py +++ b/llama_stack/distribution/ui/page/distribution/shields.py @@ -6,14 +6,14 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def shields(): # Shields Section st.header("Shields") - shields_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()} + shields_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.shields.list()} selected_shield = st.selectbox("Select a shield", list(shields_info.keys())) st.json(shields_info[selected_shield]) diff --git a/llama_stack/distribution/ui/page/distribution/vector_dbs.py b/llama_stack/distribution/ui/page/distribution/vector_dbs.py index 49a4f25bb..e7eb7b13b 100644 --- a/llama_stack/distribution/ui/page/distribution/vector_dbs.py +++ b/llama_stack/distribution/ui/page/distribution/vector_dbs.py @@ -6,12 +6,12 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def vector_dbs(): st.header("Vector Databases") - vector_dbs_info = {v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list()} + vector_dbs_info = {v.identifier: v.to_dict() for v in LlamaStackApi().client.vector_dbs.list()} if len(vector_dbs_info) > 0: selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys())) diff --git a/llama_stack/distribution/ui/page/evaluations/app_eval.py b/llama_stack/distribution/ui/page/evaluations/app_eval.py index d7bc6388c..13da6071e 100644 --- a/llama_stack/distribution/ui/page/evaluations/app_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/app_eval.py @@ -9,7 +9,7 @@ import json import pandas as pd import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi from llama_stack.distribution.ui.modules.utils import process_dataset @@ -39,7 +39,7 @@ def application_evaluation_page(): # Select Scoring Functions to Run Evaluation On st.subheader("Select Scoring Functions") - scoring_functions = llama_stack_api.client.scoring_functions.list() + scoring_functions = LlamaStackApi().client.scoring_functions.list() scoring_functions = {sf.identifier: sf for sf in scoring_functions} scoring_functions_names = list(scoring_functions.keys()) selected_scoring_functions = st.multiselect( @@ -48,7 +48,7 @@ def application_evaluation_page(): help="Choose one or more scoring functions.", ) - available_models = llama_stack_api.client.models.list() + available_models = LlamaStackApi().client.models.list() available_models = [m.identifier for m in available_models] scoring_params = {} @@ -108,7 +108,7 @@ def application_evaluation_page(): progress_bar.progress(progress, text=progress_text) # Run evaluation for current row - score_res = llama_stack_api.run_scoring( + score_res = LlamaStackApi().run_scoring( r, scoring_function_ids=selected_scoring_functions, scoring_params=scoring_params, diff --git a/llama_stack/distribution/ui/page/evaluations/native_eval.py b/llama_stack/distribution/ui/page/evaluations/native_eval.py index 97f875e17..133c3b151 100644 --- a/llama_stack/distribution/ui/page/evaluations/native_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/native_eval.py @@ -9,13 +9,13 @@ import json import pandas as pd import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi def select_benchmark_1(): # Select Benchmarks st.subheader("1. Choose An Eval Task") - benchmarks = llama_stack_api.client.benchmarks.list() + benchmarks = LlamaStackApi().client.benchmarks.list() benchmarks = {et.identifier: et for et in benchmarks} benchmarks_names = list(benchmarks.keys()) selected_benchmark = st.selectbox( @@ -47,7 +47,7 @@ def define_eval_candidate_2(): # Define Eval Candidate candidate_type = st.radio("Candidate Type", ["model", "agent"]) - available_models = llama_stack_api.client.models.list() + available_models = LlamaStackApi().client.models.list() available_models = [model.identifier for model in available_models] selected_model = st.selectbox( "Choose a model", @@ -167,7 +167,7 @@ def run_evaluation_3(): eval_candidate = st.session_state["eval_candidate"] dataset_id = benchmarks[selected_benchmark].dataset_id - rows = llama_stack_api.client.datasets.iterrows( + rows = LlamaStackApi().client.datasets.iterrows( dataset_id=dataset_id, ) total_rows = len(rows.data) @@ -208,7 +208,7 @@ def run_evaluation_3(): progress = i / len(rows) progress_bar.progress(progress, text=progress_text) # Run evaluation for current row - eval_res = llama_stack_api.client.eval.evaluate_rows( + eval_res = LlamaStackApi().client.eval.evaluate_rows( benchmark_id=selected_benchmark, input_rows=[r], scoring_functions=benchmarks[selected_benchmark].scoring_functions, diff --git a/llama_stack/distribution/ui/page/playground/chat.py b/llama_stack/distribution/ui/page/playground/chat.py index fcaf08795..053ae42de 100644 --- a/llama_stack/distribution/ui/page/playground/chat.py +++ b/llama_stack/distribution/ui/page/playground/chat.py @@ -6,12 +6,12 @@ import streamlit as st -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi # Sidebar configurations with st.sidebar: st.header("Configuration") - available_models = llama_stack_api.client.models.list() + available_models = LlamaStackApi().client.models.list() available_models = [model.identifier for model in available_models if model.model_type == "llm"] selected_model = st.selectbox( "Choose a model", @@ -103,7 +103,7 @@ if prompt := st.chat_input("Example: What is Llama Stack?"): else: strategy = {"type": "greedy"} - response = llama_stack_api.client.inference.chat_completion( + response = LlamaStackApi().client.inference.chat_completion( messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": prompt}, diff --git a/llama_stack/distribution/ui/page/playground/rag.py b/llama_stack/distribution/ui/page/playground/rag.py index 696d89bc2..94e27a255 100644 --- a/llama_stack/distribution/ui/page/playground/rag.py +++ b/llama_stack/distribution/ui/page/playground/rag.py @@ -10,7 +10,7 @@ import streamlit as st from llama_stack_client import Agent, AgentEventLogger, RAGDocument from llama_stack.apis.common.content_types import ToolCallDelta -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi from llama_stack.distribution.ui.modules.utils import data_url_from_file @@ -57,14 +57,14 @@ def rag_chat_page(): for i, uploaded_file in enumerate(uploaded_files) ] - providers = llama_stack_api.client.providers.list() + providers = LlamaStackApi().client.providers.list() vector_io_provider = None for x in providers: if x.api == "vector_io": vector_io_provider = x.provider_id - llama_stack_api.client.vector_dbs.register( + LlamaStackApi().client.vector_dbs.register( vector_db_id=vector_db_name, # Use the user-provided name embedding_dimension=384, embedding_model="all-MiniLM-L6-v2", @@ -72,7 +72,7 @@ def rag_chat_page(): ) # insert documents using the custom vector db name - llama_stack_api.client.tool_runtime.rag_tool.insert( + LlamaStackApi().client.tool_runtime.rag_tool.insert( vector_db_id=vector_db_name, # Use the user-provided name documents=documents, chunk_size_in_tokens=512, @@ -93,7 +93,7 @@ def rag_chat_page(): ) # select memory banks - vector_dbs = llama_stack_api.client.vector_dbs.list() + vector_dbs = LlamaStackApi().client.vector_dbs.list() vector_dbs = [vector_db.identifier for vector_db in vector_dbs] selected_vector_dbs = st.multiselect( label="Select Document Collections to use in RAG queries", @@ -103,7 +103,7 @@ def rag_chat_page(): ) st.subheader("Inference Parameters", divider=True) - available_models = llama_stack_api.client.models.list() + available_models = LlamaStackApi().client.models.list() available_models = [model.identifier for model in available_models if model.model_type == "llm"] selected_model = st.selectbox( label="Choose a model", @@ -167,7 +167,7 @@ def rag_chat_page(): @st.cache_resource def create_agent(): return Agent( - llama_stack_api.client, + LlamaStackApi().client, model=selected_model, instructions=system_prompt, sampling_params={ @@ -232,7 +232,7 @@ def rag_chat_page(): st.session_state.messages.append({"role": "system", "content": system_prompt}) # Query the vector DB - rag_response = llama_stack_api.client.tool_runtime.rag_tool.query( + rag_response = LlamaStackApi().client.tool_runtime.rag_tool.query( content=prompt, vector_db_ids=list(selected_vector_dbs) ) prompt_context = rag_response.content @@ -251,7 +251,7 @@ def rag_chat_page(): # Run inference directly st.session_state.messages.append({"role": "user", "content": extended_prompt}) - response = llama_stack_api.client.inference.chat_completion( + response = LlamaStackApi().client.inference.chat_completion( messages=st.session_state.messages, model_id=selected_model, sampling_params={ diff --git a/llama_stack/distribution/ui/page/playground/tools.py b/llama_stack/distribution/ui/page/playground/tools.py index 149d8cce9..570bfb366 100644 --- a/llama_stack/distribution/ui/page/playground/tools.py +++ b/llama_stack/distribution/ui/page/playground/tools.py @@ -13,7 +13,7 @@ from llama_stack_client import Agent from llama_stack_client.lib.agents.react.agent import ReActAgent from llama_stack_client.lib.agents.react.tool_parser import ReActOutput -from llama_stack.distribution.ui.modules.api import llama_stack_api +from llama_stack.distribution.ui.modules.api import LlamaStackApi class AgentType(enum.Enum): @@ -24,7 +24,7 @@ class AgentType(enum.Enum): def tool_chat_page(): st.title("🛠 Tools") - client = llama_stack_api.client + client = LlamaStackApi().client models = client.models.list() model_list = [model.identifier for model in models if model.api_model_type == "llm"] @@ -55,7 +55,7 @@ def tool_chat_page(): ) if "builtin::rag" in toolgroup_selection: - vector_dbs = llama_stack_api.client.vector_dbs.list() or [] + vector_dbs = LlamaStackApi().client.vector_dbs.list() or [] if not vector_dbs: st.info("No vector databases available for selection.") vector_dbs = [vector_db.identifier for vector_db in vector_dbs]