diff --git a/llama_stack/apis/eval/eval.py b/llama_stack/apis/eval/eval.py index 5ae779ca7..50fb922fe 100644 --- a/llama_stack/apis/eval/eval.py +++ b/llama_stack/apis/eval/eval.py @@ -70,7 +70,6 @@ class Eval(Protocol): async def run_eval( self, task_id: str, - task_def: EvalTaskDef, task_config: EvalTaskConfig, ) -> Job: ... diff --git a/llama_stack/distribution/routers/routers.py b/llama_stack/distribution/routers/routers.py index 7fc65800f..8edf950b2 100644 --- a/llama_stack/distribution/routers/routers.py +++ b/llama_stack/distribution/routers/routers.py @@ -267,12 +267,10 @@ class EvalRouter(Eval): async def run_eval( self, task_id: str, - task_def: EvalTaskDef, task_config: AppEvalTaskConfig, ) -> Job: return await self.routing_table.get_provider_impl(task_id).run_eval( task_id=task_id, - task_def=task_def, task_config=task_config, ) diff --git a/llama_stack/providers/inline/meta_reference/eval/eval.py b/llama_stack/providers/inline/meta_reference/eval/eval.py index d20cf30d2..57d1d0124 100644 --- a/llama_stack/providers/inline/meta_reference/eval/eval.py +++ b/llama_stack/providers/inline/meta_reference/eval/eval.py @@ -51,22 +51,20 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate): # TODO: assume sync job, will need jobs API for async scheduling self.jobs = {} + self.eval_tasks = {} + async def initialize(self) -> None: ... async def shutdown(self) -> None: ... + async def register_eval_task(self, task_def: EvalTaskDef) -> None: + self.eval_tasks[task_def.identifier] = task_def + async def list_eval_tasks(self) -> List[EvalTaskDef]: # NOTE: In order to be routed to this provider, the eval task def must have # a EvalTaskDef with identifier defined as DEFAULT_EVAL_TASK_IDENTIFIER # for app eval where eval task benchmark_id is not pre-registered - eval_tasks = [ - EvalTaskDef( - identifier=DEFAULT_EVAL_TASK_IDENTIFIER, - dataset_id="", - scoring_functions=[], - ) - ] - return eval_tasks + return list(self.eval_tasks.values()) async def validate_eval_input_dataset_schema(self, dataset_id: str) -> None: dataset_def = await self.datasets_api.get_dataset(dataset_identifier=dataset_id) @@ -94,9 +92,9 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate): async def run_eval( self, task_id: str, - task_def: EvalTaskDef, task_config: EvalTaskConfig, ) -> Job: + task_def = self.eval_tasks[task_id] dataset_id = task_def.dataset_id candidate = task_config.eval_candidate scoring_functions = task_def.scoring_functions diff --git a/llama_stack/providers/tests/eval/test_eval.py b/llama_stack/providers/tests/eval/test_eval.py index 794026e63..242be50ca 100644 --- a/llama_stack/providers/tests/eval/test_eval.py +++ b/llama_stack/providers/tests/eval/test_eval.py @@ -9,7 +9,11 @@ import pytest from llama_models.llama3.api import SamplingParams -from llama_stack.apis.eval.eval import AppEvalTaskConfig, EvalTaskDef, ModelCandidate +from llama_stack.apis.eval.eval import ( + AppEvalTaskConfig, + EvalTaskDefWithProvider, + ModelCandidate, +) from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset @@ -28,7 +32,7 @@ class Testeval: _, eval_tasks_impl, _, _, _, _ = eval_stack response = await eval_tasks_impl.list_eval_tasks() assert isinstance(response, list) - assert len(response) >= 1 + assert len(response) == 0 @pytest.mark.asyncio async def test_eval_evaluate_rows(self, eval_stack): @@ -48,9 +52,17 @@ class Testeval: "meta-reference::llm_as_judge_8b_correctness", "meta-reference::equality", ] + task_id = "meta-reference::app_eval" + task_def = EvalTaskDefWithProvider( + identifier=task_id, + dataset_id="test_dataset_for_eval", + scoring_functions=scoring_functions, + provider_id="meta-reference", + ) + await eval_tasks_impl.register_eval_task(task_def) response = await eval_impl.evaluate_rows( - task_id="meta-reference::app_eval", + task_id=task_id, input_rows=rows.rows, scoring_functions=scoring_functions, task_config=AppEvalTaskConfig( @@ -76,15 +88,16 @@ class Testeval: "meta-reference::subset_of", ] - task_id = "meta-reference::app_eval" + task_id = "meta-reference::app_eval-2" + task_def = EvalTaskDefWithProvider( + identifier=task_id, + dataset_id="test_dataset_for_eval", + scoring_functions=scoring_functions, + provider_id="meta-reference", + ) + await eval_tasks_impl.register_eval_task(task_def) response = await eval_impl.run_eval( task_id=task_id, - task_def=EvalTaskDef( - # NOTE: this is needed to make the router work for all app evals - identifier=task_id, - dataset_id="test_dataset_for_eval", - scoring_functions=scoring_functions, - ), task_config=AppEvalTaskConfig( eval_candidate=ModelCandidate( model="Llama3.2-3B-Instruct",