From 7f2ed9622ce73cd3b1ec6bdceb0068ff651fa731 Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Mon, 2 Dec 2024 13:06:36 -0800 Subject: [PATCH] cleanup --- llama_stack/distribution/ui/modules/api.py | 8 -------- llama_stack/distribution/ui/page/evaluations/app_eval.py | 4 ++-- llama_stack/distribution/ui/page/playground/chat.py | 2 +- .../llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py | 6 +++++- 4 files changed, 8 insertions(+), 12 deletions(-) diff --git a/llama_stack/distribution/ui/modules/api.py b/llama_stack/distribution/ui/modules/api.py index 797480a92..d3852caee 100644 --- a/llama_stack/distribution/ui/modules/api.py +++ b/llama_stack/distribution/ui/modules/api.py @@ -22,14 +22,6 @@ class LlamaStackApi: }, ) - def list_scoring_functions(self): - """List all available scoring functions""" - return self.client.scoring_functions.list() - - def list_models(self): - """List all available judge models""" - return self.client.models.list() - def run_scoring( self, row, scoring_function_ids: list[str], scoring_params: Optional[dict] ): diff --git a/llama_stack/distribution/ui/page/evaluations/app_eval.py b/llama_stack/distribution/ui/page/evaluations/app_eval.py index 7c093784d..1e699462e 100644 --- a/llama_stack/distribution/ui/page/evaluations/app_eval.py +++ b/llama_stack/distribution/ui/page/evaluations/app_eval.py @@ -40,7 +40,7 @@ def application_evaluation_page(): # Select Scoring Functions to Run Evaluation On st.subheader("Select Scoring Functions") - scoring_functions = llama_stack_api.list_scoring_functions() + scoring_functions = llama_stack_api.client.scoring_functions.list() scoring_functions = {sf.identifier: sf for sf in scoring_functions} scoring_functions_names = list(scoring_functions.keys()) selected_scoring_functions = st.multiselect( @@ -49,7 +49,7 @@ def application_evaluation_page(): help="Choose one or more scoring functions.", ) - available_models = llama_stack_api.list_models() + available_models = llama_stack_api.client.models.list() available_models = [m.identifier for m in available_models] scoring_params = {} diff --git a/llama_stack/distribution/ui/page/playground/chat.py b/llama_stack/distribution/ui/page/playground/chat.py index 210211f7a..157922d3b 100644 --- a/llama_stack/distribution/ui/page/playground/chat.py +++ b/llama_stack/distribution/ui/page/playground/chat.py @@ -10,7 +10,7 @@ from modules.api import llama_stack_api # Sidebar configurations with st.sidebar: st.header("Configuration") - available_models = llama_stack_api.list_models() + available_models = llama_stack_api.client.models.list() available_models = [model.identifier for model in available_models] selected_model = st.selectbox( "Choose a model", diff --git a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py index b00b9a7db..0b18bac01 100644 --- a/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py +++ b/llama_stack/providers/inline/scoring/llm_as_judge/scoring_fn/fn_defs/llm_as_judge_base.py @@ -5,7 +5,7 @@ # the root directory of this source tree. from llama_stack.apis.common.type_system import NumberType -from llama_stack.apis.scoring_functions import ScoringFn +from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams, ScoringFn llm_as_judge_base = ScoringFn( @@ -14,4 +14,8 @@ llm_as_judge_base = ScoringFn( return_type=NumberType(), provider_id="llm-as-judge", provider_resource_id="llm-as-judge-base", + params=LLMAsJudgeScoringFnParams( + judge_model="meta-llama/Llama-3.1-405B-Instruct", + prompt_template="Enter custom LLM as Judge Prompt Template", + ), )