mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-15 22:47:59 +00:00
test eval works
This commit is contained in:
parent
413a1b6d8b
commit
3f1ac29d57
3 changed files with 126 additions and 13 deletions
|
@ -7,6 +7,12 @@
|
|||
|
||||
import pytest
|
||||
|
||||
from llama_models.llama3.api import SamplingParams
|
||||
|
||||
from llama_stack.apis.eval.eval import AppEvalTaskConfig, EvalTaskDef, ModelCandidate
|
||||
from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset
|
||||
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
# pytest llama_stack/providers/tests/eval/test_eval.py
|
||||
|
@ -22,4 +28,73 @@ class Testeval:
|
|||
_, eval_tasks_impl, _, _, _, _ = eval_stack
|
||||
response = await eval_tasks_impl.list_eval_tasks()
|
||||
assert isinstance(response, list)
|
||||
print(response)
|
||||
assert len(response) >= 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval_evaluate_rows(self, eval_stack):
|
||||
eval_impl, eval_tasks_impl, _, _, datasetio_impl, datasets_impl = eval_stack
|
||||
await register_dataset(
|
||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||
)
|
||||
response = await datasets_impl.list_datasets()
|
||||
assert len(response) == 1
|
||||
rows = await datasetio_impl.get_rows_paginated(
|
||||
dataset_id="test_dataset_for_eval",
|
||||
rows_in_page=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
|
||||
scoring_functions = [
|
||||
"meta-reference::llm_as_judge_8b_correctness",
|
||||
"meta-reference::equality",
|
||||
]
|
||||
|
||||
response = await eval_impl.evaluate_rows(
|
||||
input_rows=rows.rows,
|
||||
scoring_functions=scoring_functions,
|
||||
eval_task_config=AppEvalTaskConfig(
|
||||
eval_candidate=ModelCandidate(
|
||||
model="Llama3.2-3B-Instruct",
|
||||
sampling_params=SamplingParams(),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert len(response.generations) == 3
|
||||
assert "meta-reference::llm_as_judge_8b_correctness" in response.scores
|
||||
assert "meta-reference::equality" in response.scores
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_eval_run_eval(self, eval_stack):
|
||||
eval_impl, eval_tasks_impl, _, _, datasetio_impl, datasets_impl = eval_stack
|
||||
await register_dataset(
|
||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||
)
|
||||
|
||||
scoring_functions = [
|
||||
"meta-reference::llm_as_judge_8b_correctness",
|
||||
"meta-reference::subset_of",
|
||||
]
|
||||
|
||||
response = await eval_impl.run_eval(
|
||||
eval_task_def=EvalTaskDef(
|
||||
# NOTE: this is needed to make the router work for all app evals
|
||||
identifier="meta-reference::app_eval",
|
||||
dataset_id="test_dataset_for_eval",
|
||||
scoring_functions=scoring_functions,
|
||||
),
|
||||
eval_task_config=AppEvalTaskConfig(
|
||||
eval_candidate=ModelCandidate(
|
||||
model="Llama3.2-3B-Instruct",
|
||||
sampling_params=SamplingParams(),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert response.job_id == "0"
|
||||
job_status = await eval_impl.job_status(response.job_id)
|
||||
assert job_status and job_status.value == "completed"
|
||||
eval_response = await eval_impl.job_result(response.job_id)
|
||||
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert "meta-reference::subset_of" in eval_response.scores
|
||||
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue