forked from phoenix-oss/llama-stack-mirror
fix security update
This commit is contained in:
parent
8943b283e9
commit
b174effe05
14 changed files with 45 additions and 41 deletions
|
@ -9,7 +9,7 @@ import json
|
|||
import pandas as pd
|
||||
import streamlit as st
|
||||
|
||||
from llama_stack.distribution.ui.modules.api import llama_stack_api
|
||||
from llama_stack.distribution.ui.modules.api import LlamaStackApi
|
||||
from llama_stack.distribution.ui.modules.utils import process_dataset
|
||||
|
||||
|
||||
|
@ -39,7 +39,7 @@ def application_evaluation_page():
|
|||
|
||||
# Select Scoring Functions to Run Evaluation On
|
||||
st.subheader("Select Scoring Functions")
|
||||
scoring_functions = llama_stack_api.client.scoring_functions.list()
|
||||
scoring_functions = LlamaStackApi().client.scoring_functions.list()
|
||||
scoring_functions = {sf.identifier: sf for sf in scoring_functions}
|
||||
scoring_functions_names = list(scoring_functions.keys())
|
||||
selected_scoring_functions = st.multiselect(
|
||||
|
@ -48,7 +48,7 @@ def application_evaluation_page():
|
|||
help="Choose one or more scoring functions.",
|
||||
)
|
||||
|
||||
available_models = llama_stack_api.client.models.list()
|
||||
available_models = LlamaStackApi().client.models.list()
|
||||
available_models = [m.identifier for m in available_models]
|
||||
|
||||
scoring_params = {}
|
||||
|
@ -108,7 +108,7 @@ def application_evaluation_page():
|
|||
progress_bar.progress(progress, text=progress_text)
|
||||
|
||||
# Run evaluation for current row
|
||||
score_res = llama_stack_api.run_scoring(
|
||||
score_res = LlamaStackApi().run_scoring(
|
||||
r,
|
||||
scoring_function_ids=selected_scoring_functions,
|
||||
scoring_params=scoring_params,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue