migrate evals to resource (#421)

* migrate evals to resource

* remove listing of providers's evals

* change the order of params in register

* fix after rebase

* linter fix

---------

Co-authored-by: Dinesh Yeduguru <dineshyv@fb.com>
This commit is contained in:
Dinesh Yeduguru 2024-11-11 17:24:03 -08:00 committed by GitHub
parent b95cb5308f
commit 3802edfc50
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 63 additions and 56 deletions

View file

@ -12,7 +12,7 @@ from llama_models.schema_utils import json_schema_type
from pydantic import BaseModel, Field
from llama_stack.apis.datasets import Dataset
from llama_stack.apis.eval_tasks import EvalTaskDef
from llama_stack.apis.eval_tasks import EvalTask
from llama_stack.apis.memory_banks.memory_banks import MemoryBank
from llama_stack.apis.models import Model
from llama_stack.apis.scoring_functions import ScoringFnDef
@ -67,9 +67,7 @@ class ScoringFunctionsProtocolPrivate(Protocol):
class EvalTasksProtocolPrivate(Protocol):
async def list_eval_tasks(self) -> List[EvalTaskDef]: ...
async def register_eval_task(self, eval_task_def: EvalTaskDef) -> None: ...
async def register_eval_task(self, eval_task: EvalTask) -> None: ...
@json_schema_type

View file

@ -11,7 +11,7 @@ from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatu
from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets
from llama_stack.apis.eval_tasks import EvalTaskDef
from llama_stack.apis.eval_tasks import EvalTask
from llama_stack.apis.inference import Inference
from llama_stack.apis.scoring import Scoring
from llama_stack.providers.datatypes import EvalTasksProtocolPrivate
@ -53,15 +53,12 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
async def shutdown(self) -> None: ...
async def register_eval_task(self, task_def: EvalTaskDef) -> None:
async def register_eval_task(self, task_def: EvalTask) -> None:
self.eval_tasks[task_def.identifier] = task_def
async def list_eval_tasks(self) -> List[EvalTaskDef]:
return list(self.eval_tasks.values())
async def validate_eval_input_dataset_schema(self, dataset_id: str) -> None:
dataset_def = await self.datasets_api.get_dataset(dataset_identifier=dataset_id)
if not dataset_def.dataset_schema or len(dataset_def.dataset_schema) == 0:
dataset_def = await self.datasets_api.get_dataset(dataset_id=dataset_id)
if not dataset_def.schema or len(dataset_def.schema) == 0:
raise ValueError(f"Dataset {dataset_id} does not have a schema defined.")
expected_schemas = [
@ -77,7 +74,7 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
},
]
if dataset_def.dataset_schema not in expected_schemas:
if dataset_def.schema not in expected_schemas:
raise ValueError(
f"Dataset {dataset_id} does not have a correct input schema in {expected_schemas}"
)

View file

@ -11,12 +11,9 @@ from llama_models.llama3.api import SamplingParams, URL
from llama_stack.apis.common.type_system import ChatCompletionInputType, StringType
from llama_stack.apis.datasetio.datasetio import DatasetDefWithProvider
from llama_stack.apis.eval.eval import (
AppEvalTaskConfig,
BenchmarkEvalTaskConfig,
EvalTaskDefWithProvider,
ModelCandidate,
)
from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams
@ -70,13 +67,11 @@ class Testeval:
"meta-reference::equality",
]
task_id = "meta-reference::app_eval"
task_def = EvalTaskDefWithProvider(
identifier=task_id,
await eval_tasks_impl.register_eval_task(
eval_task_id=task_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
provider_id="meta-reference",
)
await eval_tasks_impl.register_eval_task(task_def)
response = await eval_impl.evaluate_rows(
task_id=task_id,
input_rows=rows.rows,
@ -125,13 +120,11 @@ class Testeval:
]
task_id = "meta-reference::app_eval-2"
task_def = EvalTaskDefWithProvider(
identifier=task_id,
await eval_tasks_impl.register_eval_task(
eval_task_id=task_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
provider_id="meta-reference",
)
await eval_tasks_impl.register_eval_task(task_def)
response = await eval_impl.run_eval(
task_id=task_id,
task_config=AppEvalTaskConfig(
@ -169,35 +162,29 @@ class Testeval:
pytest.skip(
"Only huggingface provider supports pre-registered remote datasets"
)
# register dataset
mmlu = DatasetDefWithProvider(
identifier="mmlu",
url=URL(uri="https://huggingface.co/datasets/llamastack/evals"),
dataset_schema={
await datasets_impl.register_dataset(
dataset_id="mmlu",
schema={
"input_query": StringType(),
"expected_answer": StringType(),
"chat_completion_input": ChatCompletionInputType(),
},
url=URL(uri="https://huggingface.co/datasets/llamastack/evals"),
metadata={
"path": "llamastack/evals",
"name": "evals__mmlu__details",
"split": "train",
},
provider_id="",
)
await datasets_impl.register_dataset(mmlu)
# register eval task
meta_reference_mmlu = EvalTaskDefWithProvider(
identifier="meta-reference-mmlu",
await eval_tasks_impl.register_eval_task(
eval_task_id="meta-reference-mmlu",
dataset_id="mmlu",
scoring_functions=["meta-reference::regex_parser_multiple_choice_answer"],
provider_id="",
)
await eval_tasks_impl.register_eval_task(meta_reference_mmlu)
# list benchmarks
response = await eval_tasks_impl.list_eval_tasks()
assert len(response) > 0