forked from phoenix-oss/llama-stack-mirror
Compare commits
No commits in common. "kvant" and "v0.1-kvant" have entirely different histories.
kvant
...
v0.1-kvant
23 changed files with 2363 additions and 2428 deletions
2
.github/workflows/ci-playground.yaml
vendored
2
.github/workflows/ci-playground.yaml
vendored
|
@ -60,7 +60,7 @@ jobs:
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
context: .
|
context: llama_stack/distribution/ui
|
||||||
file: llama_stack/distribution/ui/Containerfile
|
file: llama_stack/distribution/ui/Containerfile
|
||||||
provenance: mode=max
|
provenance: mode=max
|
||||||
sbom: true
|
sbom: true
|
||||||
|
|
|
@ -1,17 +1,25 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
export INFERENCE_MODEL="inference-llama4-maverick"
|
||||||
|
export EMBEDDING_MODEL="inference-bge-m3"
|
||||||
|
export EMBEDDING_DIMENSION="1024"
|
||||||
export LLAMA_STACK_PORT=8321
|
export LLAMA_STACK_PORT=8321
|
||||||
# VLLM_API_TOKEN= env file
|
export OPENAI_BASE_URL=https://maas.ai-2.kvant.cloud/v1
|
||||||
# KEYCLOAK_CLIENT_SECRET= env file
|
export OPENAI_API_KEY=sk-ZqAWqBKFXjb6y3tVej2AaA
|
||||||
|
export VLLM_MAX_TOKENS=125000
|
||||||
|
|
||||||
docker run -it \
|
docker run -it \
|
||||||
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
-p $LLAMA_STACK_PORT:$LLAMA_STACK_PORT \
|
||||||
-v $(pwd)/data:/root/.llama \
|
-v $(pwd)/data:/root/.llama \
|
||||||
--mount type=bind,source="$(pwd)"/llama_stack/templates/kvant/run.yaml,target=/root/.llama/config.yaml,readonly \
|
--mount type=bind,source="$(pwd)"/llama_stack/templates/kvant/run.yaml,target=/root/.llama/config.yaml,readonly \
|
||||||
--entrypoint python \
|
--entrypoint python \
|
||||||
--env-file ./.env \
|
|
||||||
distribution-kvant:dev \
|
distribution-kvant:dev \
|
||||||
-m llama_stack.distribution.server.server --config /root/.llama/config.yaml \
|
-m llama_stack.distribution.server.server --config /root/.llama/config.yaml \
|
||||||
--port $LLAMA_STACK_PORT \
|
--port $LLAMA_STACK_PORT \
|
||||||
|
--env VLLM_URL=$OPENAI_BASE_URL \
|
||||||
|
--env VLLM_API_TOKEN=$OPENAI_API_KEY \
|
||||||
|
--env PASSTHROUGH_URL=$OPENAI_BASE_URL \
|
||||||
|
--env PASSTHROUGH_API_KEY=$OPENAI_API_KEY \
|
||||||
|
--env INFERENCE_MODEL=$INFERENCE_MODEL \
|
||||||
|
--env EMBEDDING_MODEL=$EMBEDDING_MODEL \
|
||||||
|
--env EMBEDDING_DIMENSION=$EMBEDDING_DIMENSION \
|
||||||
|
|
|
@ -5,8 +5,7 @@ FROM python:3.12-slim
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY . /app/
|
COPY . /app/
|
||||||
RUN /usr/local/bin/python -m pip install --upgrade pip && \
|
RUN /usr/local/bin/python -m pip install --upgrade pip && \
|
||||||
/usr/local/bin/pip3 install -r requirements.txt && \
|
/usr/local/bin/pip3 install -r requirements.txt
|
||||||
/usr/local/bin/pip3 install -r llama_stack/distribution/ui/requirements.txt
|
|
||||||
EXPOSE 8501
|
EXPOSE 8501
|
||||||
|
|
||||||
ENTRYPOINT ["streamlit", "run", "llama_stack/distribution/ui/app.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
|
||||||
|
|
|
@ -48,6 +48,3 @@ uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
|
||||||
| TOGETHER_API_KEY | API key for Together provider | (empty string) |
|
| TOGETHER_API_KEY | API key for Together provider | (empty string) |
|
||||||
| SAMBANOVA_API_KEY | API key for SambaNova provider | (empty string) |
|
| SAMBANOVA_API_KEY | API key for SambaNova provider | (empty string) |
|
||||||
| OPENAI_API_KEY | API key for OpenAI provider | (empty string) |
|
| OPENAI_API_KEY | API key for OpenAI provider | (empty string) |
|
||||||
| KEYCLOAK_URL | URL for keycloak authentication | (empty string) |
|
|
||||||
| KEYCLOAK_REALM | Keycloak realm | default |
|
|
||||||
| KEYCLOAK_CLIENT_ID | Client ID for keycloak auth | (empty string) |
|
|
|
@ -50,42 +50,6 @@ def main():
|
||||||
)
|
)
|
||||||
pg.run()
|
pg.run()
|
||||||
|
|
||||||
def main2():
|
|
||||||
from dataclasses import asdict
|
|
||||||
st.subheader(f"Welcome {keycloak.user_info['preferred_username']}!")
|
|
||||||
st.write(f"Here is your user information:")
|
|
||||||
st.write(asdict(keycloak))
|
|
||||||
|
|
||||||
def get_access_token() -> str|None:
|
|
||||||
return st.session_state.get('access_token')
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
from streamlit_keycloak import login
|
|
||||||
import os
|
|
||||||
|
|
||||||
keycloak_url = os.environ.get("KEYCLOAK_URL")
|
|
||||||
keycloak_realm = os.environ.get("KEYCLOAK_REALM", "default")
|
|
||||||
keycloak_client_id = os.environ.get("KEYCLOAK_CLIENT_ID")
|
|
||||||
|
|
||||||
if keycloak_url and keycloak_client_id:
|
|
||||||
keycloak = login(
|
|
||||||
url=keycloak_url,
|
|
||||||
realm=keycloak_realm,
|
|
||||||
client_id=keycloak_client_id,
|
|
||||||
custom_labels={
|
|
||||||
"labelButton": "Sign in to kvant",
|
|
||||||
"labelLogin": "Please sign in to your kvant account.",
|
|
||||||
"errorNoPopup": "Unable to open the authentication popup. Allow popups and refresh the page to proceed.",
|
|
||||||
"errorPopupClosed": "Authentication popup was closed manually.",
|
|
||||||
"errorFatal": "Unable to connect to Keycloak using the current configuration."
|
|
||||||
},
|
|
||||||
auto_refresh=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
if keycloak.authenticated:
|
|
||||||
st.session_state['access_token'] = keycloak.access_token
|
|
||||||
main()
|
|
||||||
# TBD - add other authentications
|
|
||||||
else:
|
|
||||||
main()
|
|
||||||
|
|
|
@ -7,13 +7,11 @@
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from llama_stack_client import LlamaStackClient
|
from llama_stack_client import LlamaStackClient
|
||||||
from llama_stack.distribution.ui.app import get_access_token
|
|
||||||
|
|
||||||
|
|
||||||
class LlamaStackApi:
|
class LlamaStackApi:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.client = LlamaStackClient(
|
self.client = LlamaStackClient(
|
||||||
api_key=get_access_token(),
|
|
||||||
base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:8321"),
|
base_url=os.environ.get("LLAMA_STACK_ENDPOINT", "http://localhost:8321"),
|
||||||
provider_data={
|
provider_data={
|
||||||
"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY", ""),
|
"fireworks_api_key": os.environ.get("FIREWORKS_API_KEY", ""),
|
||||||
|
@ -30,3 +28,5 @@ class LlamaStackApi:
|
||||||
scoring_params = {fn_id: None for fn_id in scoring_function_ids}
|
scoring_params = {fn_id: None for fn_id in scoring_function_ids}
|
||||||
return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
|
return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
|
||||||
|
|
||||||
|
|
||||||
|
llama_stack_api = LlamaStackApi()
|
||||||
|
|
|
@ -6,13 +6,13 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def datasets():
|
def datasets():
|
||||||
st.header("Datasets")
|
st.header("Datasets")
|
||||||
|
|
||||||
datasets_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.datasets.list()}
|
datasets_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.datasets.list()}
|
||||||
if len(datasets_info) > 0:
|
if len(datasets_info) > 0:
|
||||||
selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
|
selected_dataset = st.selectbox("Select a dataset", list(datasets_info.keys()))
|
||||||
st.json(datasets_info[selected_dataset], expanded=True)
|
st.json(datasets_info[selected_dataset], expanded=True)
|
||||||
|
|
|
@ -6,14 +6,14 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def benchmarks():
|
def benchmarks():
|
||||||
# Benchmarks Section
|
# Benchmarks Section
|
||||||
st.header("Benchmarks")
|
st.header("Benchmarks")
|
||||||
|
|
||||||
benchmarks_info = {d.identifier: d.to_dict() for d in LlamaStackApi().client.benchmarks.list()}
|
benchmarks_info = {d.identifier: d.to_dict() for d in llama_stack_api.client.benchmarks.list()}
|
||||||
|
|
||||||
if len(benchmarks_info) > 0:
|
if len(benchmarks_info) > 0:
|
||||||
selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect")
|
selected_benchmark = st.selectbox("Select an eval task", list(benchmarks_info.keys()), key="benchmark_inspect")
|
||||||
|
|
|
@ -6,13 +6,13 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def models():
|
def models():
|
||||||
# Models Section
|
# Models Section
|
||||||
st.header("Models")
|
st.header("Models")
|
||||||
models_info = {m.identifier: m.to_dict() for m in LlamaStackApi().client.models.list()}
|
models_info = {m.identifier: m.to_dict() for m in llama_stack_api.client.models.list()}
|
||||||
|
|
||||||
selected_model = st.selectbox("Select a model", list(models_info.keys()))
|
selected_model = st.selectbox("Select a model", list(models_info.keys()))
|
||||||
st.json(models_info[selected_model])
|
st.json(models_info[selected_model])
|
||||||
|
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def providers():
|
def providers():
|
||||||
st.header("🔍 API Providers")
|
st.header("🔍 API Providers")
|
||||||
apis_providers_lst = LlamaStackApi().client.providers.list()
|
apis_providers_lst = llama_stack_api.client.providers.list()
|
||||||
api_to_providers = {}
|
api_to_providers = {}
|
||||||
for api_provider in apis_providers_lst:
|
for api_provider in apis_providers_lst:
|
||||||
if api_provider.api in api_to_providers:
|
if api_provider.api in api_to_providers:
|
||||||
|
|
|
@ -6,13 +6,13 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def scoring_functions():
|
def scoring_functions():
|
||||||
st.header("Scoring Functions")
|
st.header("Scoring Functions")
|
||||||
|
|
||||||
scoring_functions_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.scoring_functions.list()}
|
scoring_functions_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.scoring_functions.list()}
|
||||||
|
|
||||||
selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys()))
|
selected_scoring_function = st.selectbox("Select a scoring function", list(scoring_functions_info.keys()))
|
||||||
st.json(scoring_functions_info[selected_scoring_function], expanded=True)
|
st.json(scoring_functions_info[selected_scoring_function], expanded=True)
|
||||||
|
|
|
@ -6,14 +6,14 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def shields():
|
def shields():
|
||||||
# Shields Section
|
# Shields Section
|
||||||
st.header("Shields")
|
st.header("Shields")
|
||||||
|
|
||||||
shields_info = {s.identifier: s.to_dict() for s in LlamaStackApi().client.shields.list()}
|
shields_info = {s.identifier: s.to_dict() for s in llama_stack_api.client.shields.list()}
|
||||||
|
|
||||||
selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
|
selected_shield = st.selectbox("Select a shield", list(shields_info.keys()))
|
||||||
st.json(shields_info[selected_shield])
|
st.json(shields_info[selected_shield])
|
||||||
|
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def vector_dbs():
|
def vector_dbs():
|
||||||
st.header("Vector Databases")
|
st.header("Vector Databases")
|
||||||
vector_dbs_info = {v.identifier: v.to_dict() for v in LlamaStackApi().client.vector_dbs.list()}
|
vector_dbs_info = {v.identifier: v.to_dict() for v in llama_stack_api.client.vector_dbs.list()}
|
||||||
|
|
||||||
if len(vector_dbs_info) > 0:
|
if len(vector_dbs_info) > 0:
|
||||||
selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys()))
|
selected_vector_db = st.selectbox("Select a vector database", list(vector_dbs_info.keys()))
|
||||||
|
|
|
@ -9,7 +9,7 @@ import json
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
from llama_stack.distribution.ui.modules.utils import process_dataset
|
from llama_stack.distribution.ui.modules.utils import process_dataset
|
||||||
|
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ def application_evaluation_page():
|
||||||
|
|
||||||
# Select Scoring Functions to Run Evaluation On
|
# Select Scoring Functions to Run Evaluation On
|
||||||
st.subheader("Select Scoring Functions")
|
st.subheader("Select Scoring Functions")
|
||||||
scoring_functions = LlamaStackApi().client.scoring_functions.list()
|
scoring_functions = llama_stack_api.client.scoring_functions.list()
|
||||||
scoring_functions = {sf.identifier: sf for sf in scoring_functions}
|
scoring_functions = {sf.identifier: sf for sf in scoring_functions}
|
||||||
scoring_functions_names = list(scoring_functions.keys())
|
scoring_functions_names = list(scoring_functions.keys())
|
||||||
selected_scoring_functions = st.multiselect(
|
selected_scoring_functions = st.multiselect(
|
||||||
|
@ -48,7 +48,7 @@ def application_evaluation_page():
|
||||||
help="Choose one or more scoring functions.",
|
help="Choose one or more scoring functions.",
|
||||||
)
|
)
|
||||||
|
|
||||||
available_models = LlamaStackApi().client.models.list()
|
available_models = llama_stack_api.client.models.list()
|
||||||
available_models = [m.identifier for m in available_models]
|
available_models = [m.identifier for m in available_models]
|
||||||
|
|
||||||
scoring_params = {}
|
scoring_params = {}
|
||||||
|
@ -108,7 +108,7 @@ def application_evaluation_page():
|
||||||
progress_bar.progress(progress, text=progress_text)
|
progress_bar.progress(progress, text=progress_text)
|
||||||
|
|
||||||
# Run evaluation for current row
|
# Run evaluation for current row
|
||||||
score_res = LlamaStackApi().run_scoring(
|
score_res = llama_stack_api.run_scoring(
|
||||||
r,
|
r,
|
||||||
scoring_function_ids=selected_scoring_functions,
|
scoring_function_ids=selected_scoring_functions,
|
||||||
scoring_params=scoring_params,
|
scoring_params=scoring_params,
|
||||||
|
|
|
@ -9,13 +9,13 @@ import json
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
def select_benchmark_1():
|
def select_benchmark_1():
|
||||||
# Select Benchmarks
|
# Select Benchmarks
|
||||||
st.subheader("1. Choose An Eval Task")
|
st.subheader("1. Choose An Eval Task")
|
||||||
benchmarks = LlamaStackApi().client.benchmarks.list()
|
benchmarks = llama_stack_api.client.benchmarks.list()
|
||||||
benchmarks = {et.identifier: et for et in benchmarks}
|
benchmarks = {et.identifier: et for et in benchmarks}
|
||||||
benchmarks_names = list(benchmarks.keys())
|
benchmarks_names = list(benchmarks.keys())
|
||||||
selected_benchmark = st.selectbox(
|
selected_benchmark = st.selectbox(
|
||||||
|
@ -47,7 +47,7 @@ def define_eval_candidate_2():
|
||||||
# Define Eval Candidate
|
# Define Eval Candidate
|
||||||
candidate_type = st.radio("Candidate Type", ["model", "agent"])
|
candidate_type = st.radio("Candidate Type", ["model", "agent"])
|
||||||
|
|
||||||
available_models = LlamaStackApi().client.models.list()
|
available_models = llama_stack_api.client.models.list()
|
||||||
available_models = [model.identifier for model in available_models]
|
available_models = [model.identifier for model in available_models]
|
||||||
selected_model = st.selectbox(
|
selected_model = st.selectbox(
|
||||||
"Choose a model",
|
"Choose a model",
|
||||||
|
@ -167,7 +167,7 @@ def run_evaluation_3():
|
||||||
eval_candidate = st.session_state["eval_candidate"]
|
eval_candidate = st.session_state["eval_candidate"]
|
||||||
|
|
||||||
dataset_id = benchmarks[selected_benchmark].dataset_id
|
dataset_id = benchmarks[selected_benchmark].dataset_id
|
||||||
rows = LlamaStackApi().client.datasets.iterrows(
|
rows = llama_stack_api.client.datasets.iterrows(
|
||||||
dataset_id=dataset_id,
|
dataset_id=dataset_id,
|
||||||
)
|
)
|
||||||
total_rows = len(rows.data)
|
total_rows = len(rows.data)
|
||||||
|
@ -208,7 +208,7 @@ def run_evaluation_3():
|
||||||
progress = i / len(rows)
|
progress = i / len(rows)
|
||||||
progress_bar.progress(progress, text=progress_text)
|
progress_bar.progress(progress, text=progress_text)
|
||||||
# Run evaluation for current row
|
# Run evaluation for current row
|
||||||
eval_res = LlamaStackApi().client.eval.evaluate_rows(
|
eval_res = llama_stack_api.client.eval.evaluate_rows(
|
||||||
benchmark_id=selected_benchmark,
|
benchmark_id=selected_benchmark,
|
||||||
input_rows=[r],
|
input_rows=[r],
|
||||||
scoring_functions=benchmarks[selected_benchmark].scoring_functions,
|
scoring_functions=benchmarks[selected_benchmark].scoring_functions,
|
||||||
|
|
|
@ -6,12 +6,12 @@
|
||||||
|
|
||||||
import streamlit as st
|
import streamlit as st
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
# Sidebar configurations
|
# Sidebar configurations
|
||||||
with st.sidebar:
|
with st.sidebar:
|
||||||
st.header("Configuration")
|
st.header("Configuration")
|
||||||
available_models = LlamaStackApi().client.models.list()
|
available_models = llama_stack_api.client.models.list()
|
||||||
available_models = [model.identifier for model in available_models if model.model_type == "llm"]
|
available_models = [model.identifier for model in available_models if model.model_type == "llm"]
|
||||||
selected_model = st.selectbox(
|
selected_model = st.selectbox(
|
||||||
"Choose a model",
|
"Choose a model",
|
||||||
|
@ -103,7 +103,7 @@ if prompt := st.chat_input("Example: What is Llama Stack?"):
|
||||||
else:
|
else:
|
||||||
strategy = {"type": "greedy"}
|
strategy = {"type": "greedy"}
|
||||||
|
|
||||||
response = LlamaStackApi().client.inference.chat_completion(
|
response = llama_stack_api.client.inference.chat_completion(
|
||||||
messages=[
|
messages=[
|
||||||
{"role": "system", "content": system_prompt},
|
{"role": "system", "content": system_prompt},
|
||||||
{"role": "user", "content": prompt},
|
{"role": "user", "content": prompt},
|
||||||
|
|
|
@ -10,7 +10,7 @@ import streamlit as st
|
||||||
from llama_stack_client import Agent, AgentEventLogger, RAGDocument
|
from llama_stack_client import Agent, AgentEventLogger, RAGDocument
|
||||||
|
|
||||||
from llama_stack.apis.common.content_types import ToolCallDelta
|
from llama_stack.apis.common.content_types import ToolCallDelta
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
from llama_stack.distribution.ui.modules.utils import data_url_from_file
|
from llama_stack.distribution.ui.modules.utils import data_url_from_file
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,14 +57,14 @@ def rag_chat_page():
|
||||||
for i, uploaded_file in enumerate(uploaded_files)
|
for i, uploaded_file in enumerate(uploaded_files)
|
||||||
]
|
]
|
||||||
|
|
||||||
providers = LlamaStackApi().client.providers.list()
|
providers = llama_stack_api.client.providers.list()
|
||||||
vector_io_provider = None
|
vector_io_provider = None
|
||||||
|
|
||||||
for x in providers:
|
for x in providers:
|
||||||
if x.api == "vector_io":
|
if x.api == "vector_io":
|
||||||
vector_io_provider = x.provider_id
|
vector_io_provider = x.provider_id
|
||||||
|
|
||||||
LlamaStackApi().client.vector_dbs.register(
|
llama_stack_api.client.vector_dbs.register(
|
||||||
vector_db_id=vector_db_name, # Use the user-provided name
|
vector_db_id=vector_db_name, # Use the user-provided name
|
||||||
embedding_dimension=384,
|
embedding_dimension=384,
|
||||||
embedding_model="all-MiniLM-L6-v2",
|
embedding_model="all-MiniLM-L6-v2",
|
||||||
|
@ -72,7 +72,7 @@ def rag_chat_page():
|
||||||
)
|
)
|
||||||
|
|
||||||
# insert documents using the custom vector db name
|
# insert documents using the custom vector db name
|
||||||
LlamaStackApi().client.tool_runtime.rag_tool.insert(
|
llama_stack_api.client.tool_runtime.rag_tool.insert(
|
||||||
vector_db_id=vector_db_name, # Use the user-provided name
|
vector_db_id=vector_db_name, # Use the user-provided name
|
||||||
documents=documents,
|
documents=documents,
|
||||||
chunk_size_in_tokens=512,
|
chunk_size_in_tokens=512,
|
||||||
|
@ -93,7 +93,7 @@ def rag_chat_page():
|
||||||
)
|
)
|
||||||
|
|
||||||
# select memory banks
|
# select memory banks
|
||||||
vector_dbs = LlamaStackApi().client.vector_dbs.list()
|
vector_dbs = llama_stack_api.client.vector_dbs.list()
|
||||||
vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
|
vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
|
||||||
selected_vector_dbs = st.multiselect(
|
selected_vector_dbs = st.multiselect(
|
||||||
label="Select Document Collections to use in RAG queries",
|
label="Select Document Collections to use in RAG queries",
|
||||||
|
@ -103,7 +103,7 @@ def rag_chat_page():
|
||||||
)
|
)
|
||||||
|
|
||||||
st.subheader("Inference Parameters", divider=True)
|
st.subheader("Inference Parameters", divider=True)
|
||||||
available_models = LlamaStackApi().client.models.list()
|
available_models = llama_stack_api.client.models.list()
|
||||||
available_models = [model.identifier for model in available_models if model.model_type == "llm"]
|
available_models = [model.identifier for model in available_models if model.model_type == "llm"]
|
||||||
selected_model = st.selectbox(
|
selected_model = st.selectbox(
|
||||||
label="Choose a model",
|
label="Choose a model",
|
||||||
|
@ -167,7 +167,7 @@ def rag_chat_page():
|
||||||
@st.cache_resource
|
@st.cache_resource
|
||||||
def create_agent():
|
def create_agent():
|
||||||
return Agent(
|
return Agent(
|
||||||
LlamaStackApi().client,
|
llama_stack_api.client,
|
||||||
model=selected_model,
|
model=selected_model,
|
||||||
instructions=system_prompt,
|
instructions=system_prompt,
|
||||||
sampling_params={
|
sampling_params={
|
||||||
|
@ -232,7 +232,7 @@ def rag_chat_page():
|
||||||
st.session_state.messages.append({"role": "system", "content": system_prompt})
|
st.session_state.messages.append({"role": "system", "content": system_prompt})
|
||||||
|
|
||||||
# Query the vector DB
|
# Query the vector DB
|
||||||
rag_response = LlamaStackApi().client.tool_runtime.rag_tool.query(
|
rag_response = llama_stack_api.client.tool_runtime.rag_tool.query(
|
||||||
content=prompt, vector_db_ids=list(selected_vector_dbs)
|
content=prompt, vector_db_ids=list(selected_vector_dbs)
|
||||||
)
|
)
|
||||||
prompt_context = rag_response.content
|
prompt_context = rag_response.content
|
||||||
|
@ -251,7 +251,7 @@ def rag_chat_page():
|
||||||
|
|
||||||
# Run inference directly
|
# Run inference directly
|
||||||
st.session_state.messages.append({"role": "user", "content": extended_prompt})
|
st.session_state.messages.append({"role": "user", "content": extended_prompt})
|
||||||
response = LlamaStackApi().client.inference.chat_completion(
|
response = llama_stack_api.client.inference.chat_completion(
|
||||||
messages=st.session_state.messages,
|
messages=st.session_state.messages,
|
||||||
model_id=selected_model,
|
model_id=selected_model,
|
||||||
sampling_params={
|
sampling_params={
|
||||||
|
|
|
@ -13,7 +13,7 @@ from llama_stack_client import Agent
|
||||||
from llama_stack_client.lib.agents.react.agent import ReActAgent
|
from llama_stack_client.lib.agents.react.agent import ReActAgent
|
||||||
from llama_stack_client.lib.agents.react.tool_parser import ReActOutput
|
from llama_stack_client.lib.agents.react.tool_parser import ReActOutput
|
||||||
|
|
||||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||||
|
|
||||||
|
|
||||||
class AgentType(enum.Enum):
|
class AgentType(enum.Enum):
|
||||||
|
@ -24,7 +24,7 @@ class AgentType(enum.Enum):
|
||||||
def tool_chat_page():
|
def tool_chat_page():
|
||||||
st.title("🛠 Tools")
|
st.title("🛠 Tools")
|
||||||
|
|
||||||
client = LlamaStackApi().client
|
client = llama_stack_api.client
|
||||||
models = client.models.list()
|
models = client.models.list()
|
||||||
model_list = [model.identifier for model in models if model.api_model_type == "llm"]
|
model_list = [model.identifier for model in models if model.api_model_type == "llm"]
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ def tool_chat_page():
|
||||||
)
|
)
|
||||||
|
|
||||||
if "builtin::rag" in toolgroup_selection:
|
if "builtin::rag" in toolgroup_selection:
|
||||||
vector_dbs = LlamaStackApi().client.vector_dbs.list() or []
|
vector_dbs = llama_stack_api.client.vector_dbs.list() or []
|
||||||
if not vector_dbs:
|
if not vector_dbs:
|
||||||
st.info("No vector databases available for selection.")
|
st.info("No vector databases available for selection.")
|
||||||
vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
|
vector_dbs = [vector_db.identifier for vector_db in vector_dbs]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
llama-stack-client>=0.2.9
|
llama-stack>=0.2.1
|
||||||
|
llama-stack-client>=0.2.1
|
||||||
pandas
|
pandas
|
||||||
streamlit
|
streamlit
|
||||||
streamlit-option-menu
|
streamlit-option-menu
|
||||||
streamlit-keycloak
|
|
||||||
|
|
|
@ -154,17 +154,3 @@ tool_groups:
|
||||||
provider_id: rag-runtime
|
provider_id: rag-runtime
|
||||||
server:
|
server:
|
||||||
port: 8321
|
port: 8321
|
||||||
auth:
|
|
||||||
provider_type: "oauth2_token"
|
|
||||||
config:
|
|
||||||
jwks:
|
|
||||||
introspection:
|
|
||||||
url: ${env.KEYCLOAK_INSTROSPECT:https://iam.phoenix-systems.ch/realms/kvant/protocol/openid-connect/token/introspect}
|
|
||||||
client_id: ${env.KEYCLOAK_CLIENT_ID:llama-stack}
|
|
||||||
client_secret: ${env.KEYCLOAK_CLIENT_SECRET}
|
|
||||||
claims_mapping:
|
|
||||||
sub: projects
|
|
||||||
scope: roles
|
|
||||||
#groups: teams
|
|
||||||
customer/id: teams
|
|
||||||
aud: namespaces
|
|
||||||
|
|
|
@ -1,7 +1,3 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
export KEYCLOAK_URL="https://iam.phoenix-systems.ch"
|
|
||||||
export KEYCLOAK_REALM="kvant"
|
|
||||||
export KEYCLOAK_CLIENT_ID="llama-stack-playground"
|
|
||||||
|
|
||||||
uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
|
uv run --with ".[ui]" streamlit run llama_stack/distribution/ui/app.py
|
||||||
|
|
|
@ -49,7 +49,6 @@ ui = [
|
||||||
"pandas",
|
"pandas",
|
||||||
"llama-stack-client>=0.2.9",
|
"llama-stack-client>=0.2.9",
|
||||||
"streamlit-option-menu",
|
"streamlit-option-menu",
|
||||||
"streamlit-keycloak",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[dependency-groups]
|
[dependency-groups]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue