mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
[Evals API][10/n] API updates for EvalTaskDef + new test migration (#379)
* wip * scoring fn api * eval api * eval task * evaluate api update * pre commit * unwrap context -> config * config field doc * typo * naming fix * separate benchmark / app eval * api name * rename * wip tests * wip * datasetio test * delete unused * fixture * scoring resolve * fix scoring register * scoring test pass * score batch * scoring fix * fix eval * test eval works * remove type ignore * api refactor * add default task_eval_id for routing * add eval_id for jobs * remove type ignore * only keep 1 run_eval * fix optional * register task required * register task required * delete old tests * delete old tests * fixture return impl
This commit is contained in:
parent
8350f2df4c
commit
6192bf43a4
32 changed files with 916 additions and 389 deletions
|
@ -3,81 +3,124 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
from llama_stack.apis.datasetio import * # noqa: F403
|
||||
from llama_stack.apis.eval.eval import ModelCandidate
|
||||
from llama_stack.distribution.datatypes import * # noqa: F403
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_models.llama3.api import SamplingParams
|
||||
|
||||
from llama_stack.apis.eval.eval import (
|
||||
AppEvalTaskConfig,
|
||||
EvalTaskDefWithProvider,
|
||||
ModelCandidate,
|
||||
)
|
||||
from llama_stack.distribution.datatypes import Api
|
||||
from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset
|
||||
from llama_stack.providers.tests.resolver import resolve_impls_for_test
|
||||
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
# 1. Ensure you have a conda with the right dependencies installed. This is a bit tricky
|
||||
# since it depends on the provider you are testing. On top of that you need
|
||||
# `pytest` and `pytest-asyncio` installed.
|
||||
#
|
||||
# 2. Copy and modify the provider_config_example.yaml depending on the provider you are testing.
|
||||
#
|
||||
# 3. Run:
|
||||
#
|
||||
# ```bash
|
||||
# PROVIDER_ID=<your_provider> \
|
||||
# PROVIDER_CONFIG=provider_config.yaml \
|
||||
# pytest -s llama_stack/providers/tests/eval/test_eval.py \
|
||||
# --tb=short --disable-warnings
|
||||
# ```
|
||||
# pytest llama_stack/providers/tests/eval/test_eval.py
|
||||
# -m "meta_reference"
|
||||
# -v -s --tb=short --disable-warnings
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def eval_settings():
|
||||
impls = await resolve_impls_for_test(
|
||||
Api.eval, deps=[Api.datasetio, Api.scoring, Api.inference]
|
||||
)
|
||||
return {
|
||||
"eval_impl": impls[Api.eval],
|
||||
"scoring_impl": impls[Api.scoring],
|
||||
"datasets_impl": impls[Api.datasets],
|
||||
}
|
||||
class Testeval:
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval_tasks_list(self, eval_stack):
|
||||
# NOTE: this needs you to ensure that you are starting from a clean state
|
||||
# but so far we don't have an unregister API unfortunately, so be careful
|
||||
eval_tasks_impl = eval_stack[Api.eval_tasks]
|
||||
response = await eval_tasks_impl.list_eval_tasks()
|
||||
assert isinstance(response, list)
|
||||
assert len(response) == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval_evaluate_rows(self, eval_stack):
|
||||
eval_impl, eval_tasks_impl, datasetio_impl, datasets_impl = (
|
||||
eval_stack[Api.eval],
|
||||
eval_stack[Api.eval_tasks],
|
||||
eval_stack[Api.datasetio],
|
||||
eval_stack[Api.datasets],
|
||||
)
|
||||
await register_dataset(
|
||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||
)
|
||||
response = await datasets_impl.list_datasets()
|
||||
assert len(response) == 1
|
||||
rows = await datasetio_impl.get_rows_paginated(
|
||||
dataset_id="test_dataset_for_eval",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval(eval_settings):
|
||||
datasets_impl = eval_settings["datasets_impl"]
|
||||
await register_dataset(
|
||||
datasets_impl,
|
||||
for_generation=True,
|
||||
dataset_id="test_dataset_for_eval",
|
||||
)
|
||||
|
||||
response = await datasets_impl.list_datasets()
|
||||
assert len(response) == 1
|
||||
|
||||
eval_impl = eval_settings["eval_impl"]
|
||||
response = await eval_impl.evaluate_batch(
|
||||
dataset_id=response[0].identifier,
|
||||
candidate=ModelCandidate(
|
||||
model="Llama3.2-1B-Instruct",
|
||||
sampling_params=SamplingParams(),
|
||||
),
|
||||
scoring_functions=[
|
||||
"meta-reference::subset_of",
|
||||
scoring_functions = [
|
||||
"meta-reference::llm_as_judge_8b_correctness",
|
||||
],
|
||||
)
|
||||
assert response.job_id == "0"
|
||||
job_status = await eval_impl.job_status(response.job_id)
|
||||
"meta-reference::equality",
|
||||
]
|
||||
task_id = "meta-reference::app_eval"
|
||||
task_def = EvalTaskDefWithProvider(
|
||||
identifier=task_id,
|
||||
dataset_id="test_dataset_for_eval",
|
||||
scoring_functions=scoring_functions,
|
||||
provider_id="meta-reference",
|
||||
)
|
||||
await eval_tasks_impl.register_eval_task(task_def)
|
||||
|
||||
assert job_status and job_status.value == "completed"
|
||||
response = await eval_impl.evaluate_rows(
|
||||
task_id=task_id,
|
||||
input_rows=rows.rows,
|
||||
scoring_functions=scoring_functions,
|
||||
task_config=AppEvalTaskConfig(
|
||||
eval_candidate=ModelCandidate(
|
||||
model="Llama3.2-3B-Instruct",
|
||||
sampling_params=SamplingParams(),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert len(response.generations) == 3
|
||||
assert "meta-reference::llm_as_judge_8b_correctness" in response.scores
|
||||
assert "meta-reference::equality" in response.scores
|
||||
|
||||
eval_response = await eval_impl.job_result(response.job_id)
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval_run_eval(self, eval_stack):
|
||||
eval_impl, eval_tasks_impl, datasets_impl = (
|
||||
eval_stack[Api.eval],
|
||||
eval_stack[Api.eval_tasks],
|
||||
eval_stack[Api.datasets],
|
||||
)
|
||||
await register_dataset(
|
||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||
)
|
||||
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert "meta-reference::subset_of" in eval_response.scores
|
||||
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
|
||||
scoring_functions = [
|
||||
"meta-reference::llm_as_judge_8b_correctness",
|
||||
"meta-reference::subset_of",
|
||||
]
|
||||
|
||||
task_id = "meta-reference::app_eval-2"
|
||||
task_def = EvalTaskDefWithProvider(
|
||||
identifier=task_id,
|
||||
dataset_id="test_dataset_for_eval",
|
||||
scoring_functions=scoring_functions,
|
||||
provider_id="meta-reference",
|
||||
)
|
||||
await eval_tasks_impl.register_eval_task(task_def)
|
||||
response = await eval_impl.run_eval(
|
||||
task_id=task_id,
|
||||
task_config=AppEvalTaskConfig(
|
||||
eval_candidate=ModelCandidate(
|
||||
model="Llama3.2-3B-Instruct",
|
||||
sampling_params=SamplingParams(),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert response.job_id == "0"
|
||||
job_status = await eval_impl.job_status(task_id, response.job_id)
|
||||
assert job_status and job_status.value == "completed"
|
||||
eval_response = await eval_impl.job_result(task_id, response.job_id)
|
||||
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert "meta-reference::subset_of" in eval_response.scores
|
||||
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue