fix security update
All checks were successful
Build and Push playground container / build-playground (push) Successful in 1m57s
Build and Push container / build (push) Successful in 4m33s

This commit is contained in:
Angel Nunez Mencias 2025-06-03 20:07:06 +02:00
parent 8943b283e9
commit b174effe05
Signed by: angel.nunez
SSH key fingerprint: SHA256:z1nFAg1v1AfbhEHrgBetByUJUwziv2R2f4VyN75opcg
14 changed files with 45 additions and 41 deletions

View file

@ -73,8 +73,14 @@ if __name__ == "__main__":
url=keycloak_url, url=keycloak_url,
realm=keycloak_realm, realm=keycloak_realm,
client_id=keycloak_client_id, client_id=keycloak_client_id,
custom_labels={
"labelButton": "Sign in to kvant",
"labelLogin": "Please sign in to your kvant account.",
"errorNoPopup": "Unable to open the authentication popup. Allow popups and refresh the page to proceed.",
"errorPopupClosed": "Authentication popup was closed manually.",
"errorFatal": "Unable to connect to Keycloak using the current configuration."
},
auto_refresh=True, auto_refresh=True,
init_options={"checkLoginIframe": False},
) )
if keycloak.authenticated: if keycloak.authenticated:

View file

@ -30,5 +30,3 @@ class LlamaStackApi:
scoring_params = {fn_id: None for fn_id in scoring_function_ids} scoring_params = {fn_id: None for fn_id in scoring_function_ids}
return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params) return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
llama_stack_api = LlamaStackApi()

View file

@ -6,13 +6,13 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def datasets(): def datasets():
st.header("Datasets") st.header("Datasets")
datasets_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()} datasets_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.datasets.list()}
if len(datasets_info) > 0: if len(datasets_info) > 0:
selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys())) selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
st.json(datasets_info[selected_dataset], expanded=True) st.json(datasets_info[selected_dataset], expanded=True)

View file

@ -6,14 +6,14 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def benchmarks(): def benchmarks():
# Benchmarks Section # Benchmarks Section
st.header("Benchmarks") st.header("Benchmarks")
benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()} benchmarks_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.benchmarks.list()}
if len(benchmarks_info) > 0: if len(benchmarks_info) > 0:
selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect") selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect")

View file

@ -6,13 +6,13 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def models(): def models():
# Models Section # Models Section
st.header("Models") st.header("Models")
models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()} models_info = {m.identifier: m.to_dict() for m in LlamaStackApi().client.models.list()}
selected_model = st.selectbox("Select a model", list(models_info.keys())) selected_model = st.selectbox("Select a model", list(models_info.keys()))
st.json(models_info[selected_model]) st.json(models_info[selected_model])

View file

@ -6,12 +6,12 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def providers(): def providers():
st.header("🔍 API Providers") st.header("🔍 API Providers")
apis_providers_lst = llama_stack_api.client.providers.list() apis_providers_lst = LlamaStackApi().client.providers.list()
api_to_providers = {} api_to_providers = {}
for api_provider in apis_providers_lst: for api_provider in apis_providers_lst:
if api_provider.api in api_to_providers: if api_provider.api in api_to_providers:

View file

@ -6,13 +6,13 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def scoring_functions(): def scoring_functions():
st.header("Scoring Functions") st.header("Scoring Functions")
scoring_functions_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()} scoring_functions_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.scoring_functions.list()}
selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys())) selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys()))
st.json(scoring_functions_info[selected_scoring_function], expanded=True) st.json(scoring_functions_info[selected_scoring_function], expanded=True)

View file

@ -6,14 +6,14 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def shields(): def shields():
# Shields Section # Shields Section
st.header("Shields") st.header("Shields")
shields_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()} shields_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.shields.list()}
selected_shield = st.selectbox("Select a shield", list(shields_info.keys())) selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
st.json(shields_info[selected_shield]) st.json(shields_info[selected_shield])

View file

@ -6,12 +6,12 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def vector_dbs(): def vector_dbs():
st.header("Vector Databases") st.header("Vector Databases")
vector_dbs_info = {v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list()} vector_dbs_info = {v.identifier: v.to_dict() for v in LlamaStackApi().client.vector_dbs.list()}
if len(vector_dbs_info) > 0: if len(vector_dbs_info) > 0:
selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys())) selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys()))

View file

@ -9,7 +9,7 @@ import json
import pandas as pd import pandas as pd
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
from llama_stack.distribution.ui.modules.utils import process_dataset from llama_stack.distribution.ui.modules.utils import process_dataset
@ -39,7 +39,7 @@ def application_evaluation_page():
# Select Scoring Functions to Run Evaluation On # Select Scoring Functions to Run Evaluation On
st.subheader("Select Scoring Functions") st.subheader("Select Scoring Functions")
scoring_functions = llama_stack_api.client.scoring_functions.list() scoring_functions = LlamaStackApi().client.scoring_functions.list()
scoring_functions = {sf.identifier: sf for sf in scoring_functions} scoring_functions = {sf.identifier: sf for sf in scoring_functions}
scoring_functions_names = list(scoring_functions.keys()) scoring_functions_names = list(scoring_functions.keys())
selected_scoring_functions = st.multiselect( selected_scoring_functions = st.multiselect(
@ -48,7 +48,7 @@ def application_evaluation_page():
help="Choose one or more scoring functions.", help="Choose one or more scoring functions.",
) )
available_models = llama_stack_api.client.models.list() available_models = LlamaStackApi().client.models.list()
available_models = [m.identifier for m in available_models] available_models = [m.identifier for m in available_models]
scoring_params = {} scoring_params = {}
@ -108,7 +108,7 @@ def application_evaluation_page():
progress_bar.progress(progress, text=progress_text) progress_bar.progress(progress, text=progress_text)
# Run evaluation for current row # Run evaluation for current row
score_res = llama_stack_api.run_scoring( score_res = LlamaStackApi().run_scoring(
r, r,
scoring_function_ids=selected_scoring_functions, scoring_function_ids=selected_scoring_functions,
scoring_params=scoring_params, scoring_params=scoring_params,

View file

@ -9,13 +9,13 @@ import json
import pandas as pd import pandas as pd
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
def select_benchmark_1(): def select_benchmark_1():
# Select Benchmarks # Select Benchmarks
st.subheader("1. Choose An Eval Task") st.subheader("1. Choose An Eval Task")
benchmarks = llama_stack_api.client.benchmarks.list() benchmarks = LlamaStackApi().client.benchmarks.list()
benchmarks = {et.identifier: et for et in benchmarks} benchmarks = {et.identifier: et for et in benchmarks}
benchmarks_names = list(benchmarks.keys()) benchmarks_names = list(benchmarks.keys())
selected_benchmark = st.selectbox( selected_benchmark = st.selectbox(
@ -47,7 +47,7 @@ def define_eval_candidate_2():
# Define Eval Candidate # Define Eval Candidate
candidate_type = st.radio("Candidate Type", ["model", "agent"]) candidate_type = st.radio("Candidate Type", ["model", "agent"])
available_models = llama_stack_api.client.models.list() available_models = LlamaStackApi().client.models.list()
available_models = [model.identifier for model in available_models] available_models = [model.identifier for model in available_models]
selected_model = st.selectbox( selected_model = st.selectbox(
"Choose a model", "Choose a model",
@ -167,7 +167,7 @@ def run_evaluation_3():
eval_candidate = st.session_state["eval_candidate"] eval_candidate = st.session_state["eval_candidate"]
dataset_id = benchmarks[selected_benchmark].dataset_id dataset_id = benchmarks[selected_benchmark].dataset_id
rows = llama_stack_api.client.datasets.iterrows( rows = LlamaStackApi().client.datasets.iterrows(
dataset_id=dataset_id, dataset_id=dataset_id,
) )
total_rows = len(rows.data) total_rows = len(rows.data)
@ -208,7 +208,7 @@ def run_evaluation_3():
progress = i / len(rows) progress = i / len(rows)
progress_bar.progress(progress, text=progress_text) progress_bar.progress(progress, text=progress_text)
# Run evaluation for current row # Run evaluation for current row
eval_res = llama_stack_api.client.eval.evaluate_rows( eval_res = LlamaStackApi().client.eval.evaluate_rows(
benchmark_id=selected_benchmark, benchmark_id=selected_benchmark,
input_rows=[r], input_rows=[r],
scoring_functions=benchmarks[selected_benchmark].scoring_functions, scoring_functions=benchmarks[selected_benchmark].scoring_functions,

View file

@ -6,12 +6,12 @@
import streamlit as st import streamlit as st
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
# Sidebar configurations # Sidebar configurations
with st.sidebar: with st.sidebar:
st.header("Configuration") st.header("Configuration")
available_models = llama_stack_api.client.models.list() available_models = LlamaStackApi().client.models.list()
available_models = [model.identifier for model in available_models if model.model_type == "llm"] available_models = [model.identifier for model in available_models if model.model_type == "llm"]
selected_model = st.selectbox( selected_model = st.selectbox(
"Choose a model", "Choose a model",
@ -103,7 +103,7 @@ if prompt := st.chat_input("Example: What is Llama Stack?"):
else: else:
strategy = {"type": "greedy"} strategy = {"type": "greedy"}
response = llama_stack_api.client.inference.chat_completion( response = LlamaStackApi().client.inference.chat_completion(
messages=[ messages=[
{"role": "system", "content": system_prompt}, {"role": "system", "content": system_prompt},
{"role": "user", "content": prompt}, {"role": "user", "content": prompt},

View file

@ -10,7 +10,7 @@ import streamlit as st
from llama_stack_client import Agent, AgentEventLogger, RAGDocument from llama_stack_client import Agent, AgentEventLogger, RAGDocument
from llama_stack.apis.common.content_types import ToolCallDelta from llama_stack.apis.common.content_types import ToolCallDelta
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
from llama_stack.distribution.ui.modules.utils import data_url_from_file from llama_stack.distribution.ui.modules.utils import data_url_from_file
@ -57,14 +57,14 @@ def rag_chat_page():
for i, uploaded_file in enumerate(uploaded_files) for i, uploaded_file in enumerate(uploaded_files)
] ]
providers = llama_stack_api.client.providers.list() providers = LlamaStackApi().client.providers.list()
vector_io_provider = None vector_io_provider = None
for x in providers: for x in providers:
if x.api == "vector_io": if x.api == "vector_io":
vector_io_provider = x.provider_id vector_io_provider = x.provider_id
llama_stack_api.client.vector_dbs.register( LlamaStackApi().client.vector_dbs.register(
vector_db_id=vector_db_name, # Use the user-provided name vector_db_id=vector_db_name, # Use the user-provided name
embedding_dimension=384, embedding_dimension=384,
embedding_model="all-MiniLM-L6-v2", embedding_model="all-MiniLM-L6-v2",
@ -72,7 +72,7 @@ def rag_chat_page():
) )
# insert documents using the custom vector db name # insert documents using the custom vector db name
llama_stack_api.client.tool_runtime.rag_tool.insert( LlamaStackApi().client.tool_runtime.rag_tool.insert(
vector_db_id=vector_db_name, # Use the user-provided name vector_db_id=vector_db_name, # Use the user-provided name
documents=documents, documents=documents,
chunk_size_in_tokens=512, chunk_size_in_tokens=512,
@ -93,7 +93,7 @@ def rag_chat_page():
) )
# select memory banks # select memory banks
vector_dbs = llama_stack_api.client.vector_dbs.list() vector_dbs = LlamaStackApi().client.vector_dbs.list()
vector_dbs = [vector_db.identifier for vector_db in vector_dbs] vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
selected_vector_dbs = st.multiselect( selected_vector_dbs = st.multiselect(
label="Select Document Collections to use in RAG queries", label="Select Document Collections to use in RAG queries",
@ -103,7 +103,7 @@ def rag_chat_page():
) )
st.subheader("Inference Parameters", divider=True) st.subheader("Inference Parameters", divider=True)
available_models = llama_stack_api.client.models.list() available_models = LlamaStackApi().client.models.list()
available_models = [model.identifier for model in available_models if model.model_type == "llm"] available_models = [model.identifier for model in available_models if model.model_type == "llm"]
selected_model = st.selectbox( selected_model = st.selectbox(
label="Choose a model", label="Choose a model",
@ -167,7 +167,7 @@ def rag_chat_page():
@st.cache_resource @st.cache_resource
def create_agent(): def create_agent():
return Agent( return Agent(
llama_stack_api.client, LlamaStackApi().client,
model=selected_model, model=selected_model,
instructions=system_prompt, instructions=system_prompt,
sampling_params={ sampling_params={
@ -232,7 +232,7 @@ def rag_chat_page():
st.session_state.messages.append({"role": "system", "content": system_prompt}) st.session_state.messages.append({"role": "system", "content": system_prompt})
# Query the vector DB # Query the vector DB
rag_response = llama_stack_api.client.tool_runtime.rag_tool.query( rag_response = LlamaStackApi().client.tool_runtime.rag_tool.query(
content=prompt, vector_db_ids=list(selected_vector_dbs) content=prompt, vector_db_ids=list(selected_vector_dbs)
) )
prompt_context = rag_response.content prompt_context = rag_response.content
@ -251,7 +251,7 @@ def rag_chat_page():
# Run inference directly # Run inference directly
st.session_state.messages.append({"role": "user", "content": extended_prompt}) st.session_state.messages.append({"role": "user", "content": extended_prompt})
response = llama_stack_api.client.inference.chat_completion( response = LlamaStackApi().client.inference.chat_completion(
messages=st.session_state.messages, messages=st.session_state.messages,
model_id=selected_model, model_id=selected_model,
sampling_params={ sampling_params={

View file

@ -13,7 +13,7 @@ from llama_stack_client import Agent
from llama_stack_client.lib.agents.react.agent import ReActAgent from llama_stack_client.lib.agents.react.agent import ReActAgent
from llama_stack_client.lib.agents.react.tool_parser import ReActOutput from llama_stack_client.lib.agents.react.tool_parser import ReActOutput
from llama_stack.distribution.ui.modules.api import llama_stack_api from llama_stack.distribution.ui.modules.api import LlamaStackApi
class AgentType(enum.Enum): class AgentType(enum.Enum):
@ -24,7 +24,7 @@ class AgentType(enum.Enum):
def tool_chat_page(): def tool_chat_page():
st.title("🛠 Tools") st.title("🛠 Tools")
client = llama_stack_api.client client = LlamaStackApi().client
models = client.models.list() models = client.models.list()
model_list = [model.identifier for model in models if model.api_model_type == "llm"] model_list = [model.identifier for model in models if model.api_model_type == "llm"]
@ -55,7 +55,7 @@ def tool_chat_page():
) )
if "builtin::rag" in toolgroup_selection: if "builtin::rag" in toolgroup_selection:
vector_dbs = llama_stack_api.client.vector_dbs.list() or [] vector_dbs = LlamaStackApi().client.vector_dbs.list() or []
if not vector_dbs: if not vector_dbs:
st.info("No vector databases available for selection.") st.info("No vector databases available for selection.")
vector_dbs = [vector_db.identifier for vector_db in vector_dbs] vector_dbs = [vector_db.identifier for vector_db in vector_dbs]