remove evals from top-level

This commit is contained in:
Xi Yan 2025-03-18 21:37:29 -07:00
parent a475d72155
commit 86486a94ce
10 changed files with 166 additions and 263 deletions

View file

@ -22,11 +22,16 @@ class LlamaStackApi:
},
)
def run_scoring(self, row, scoring_function_ids: list[str], scoring_params: Optional[dict]):
def run_scoring(
self, row, scoring_function_ids: list[str], scoring_params: Optional[dict]
):
"""Run scoring on a single row"""
if not scoring_params:
scoring_params = {fn_id: None for fn_id in scoring_function_ids}
return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
# TODO(xiyan): fix this
# return self.client.scoring.score(input_rows=[row], scoring_functions=scoring_params)
raise NotImplementedError("Scoring is not implemented")
llama_stack_api = LlamaStackApi()

View file

@ -4,14 +4,12 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from streamlit_option_menu import option_menu
from llama_stack.distribution.ui.page.distribution.datasets import datasets
from llama_stack.distribution.ui.page.distribution.eval_tasks import benchmarks
from llama_stack.distribution.ui.page.distribution.models import models
from llama_stack.distribution.ui.page.distribution.scoring_functions import scoring_functions
from llama_stack.distribution.ui.page.distribution.shields import shields
from llama_stack.distribution.ui.page.distribution.vector_dbs import vector_dbs
from streamlit_option_menu import option_menu
def resources_page():
@ -43,8 +41,9 @@ def resources_page():
datasets()
elif selected_resource == "Models":
models()
elif selected_resource == "Scoring Functions":
scoring_functions()
# TODO(xiyan): fix this
# elif selected_resource == "Scoring Functions":
# scoring_functions()
elif selected_resource == "Shields":
shields()