test: revamp eval related integration tests (#1433)

# What does this PR do?
- revamp and clean up datasets/scoring/eval integration tests
- closes https://github.com/meta-llama/llama-stack/issues/1396

[//]: # (If resolving an issue, uncomment and update the line below)
[//]: # (Closes #[issue-number])

## Test Plan
**dataset**
```
LLAMA_STACK_BASE_URL=http://localhost:8321 pytest -v tests/integration/datasetio/
```
<img width="842" alt="image"
src="https://github.com/user-attachments/assets/88fc2b6a-b496-47bf-bc0c-8fea48ba36ff"
/>

**scoring**
```
LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/scoring --text-model meta-llama/Llama-3.1-8B-Instruct --judge-model meta-llama/Llama-3.1-8B-Instruct
```
<img width="851" alt="image"
src="https://github.com/user-attachments/assets/50f46415-b44c-4c37-a6c3-076f2767adb3"
/>


**eval**
```
LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/eval --text-model meta-llama/Llama-3.1-8B-Instruct --judge-model meta-llama/Llama-3.1-8B-Instruct
```
<img width="841" alt="image"
src="https://github.com/user-attachments/assets/8eb1c65c-3b39-4d66-8ff4-f471ca783e49"
/>


[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-06 10:51:35 -08:00 committed by GitHub
parent 82e94fe22f
commit bcb13c492f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 184 additions and 222 deletions

View file

@ -6,7 +6,7 @@
import re
from typing import Any, Dict, Optional
from llama_stack.apis.inference.inference import Inference
from llama_stack.apis.inference.inference import Inference, UserMessage
from llama_stack.apis.scoring import ScoringResultRow
from llama_stack.apis.scoring_functions import ScoringFnParams
from llama_stack.providers.utils.scoring.base_scoring_fn import RegisteredBaseScoringFn
@ -58,10 +58,9 @@ class LlmAsJudgeScoringFn(RegisteredBaseScoringFn):
judge_response = await self.inference_api.chat_completion(
model_id=fn_def.params.judge_model,
messages=[
{
"role": "user",
"content": judge_input_msg,
}
UserMessage(
content=judge_input_msg,
),
],
)
content = judge_response.completion_message.content

View file

@ -73,6 +73,11 @@ class RegisteredBaseScoringFn(BaseScoringFn):
raise ValueError(f"Scoring function def with identifier {scoring_fn.identifier} already exists.")
self.supported_fn_defs_registry[scoring_fn.identifier] = scoring_fn
def unregister_scoring_fn_def(self, scoring_fn_id: str) -> None:
if scoring_fn_id not in self.supported_fn_defs_registry:
raise ValueError(f"Scoring function def with identifier {scoring_fn_id} does not exist.")
del self.supported_fn_defs_registry[scoring_fn_id]
@abstractmethod
async def score_row(
self,

View file

@ -59,7 +59,7 @@ def pytest_addoption(parser):
)
parser.addoption(
"--judge-model",
help="comma-separated list of judge models. Fixture name: judge_model_id",
help="Specify the judge model to use for testing",
)
parser.addoption(
"--embedding-dimension",

View file

@ -1,6 +1,6 @@
input_query,generated_answer,expected_answer,chat_completion_input
What is the capital of France?,London,Paris,"[{'role': 'user', 'content': 'What is the capital of France?'}]"
Who is the CEO of Meta?,Mark Zuckerberg,Mark Zuckerberg,"[{'role': 'user', 'content': 'Who is the CEO of Meta?'}]"
What is the largest planet in our solar system?,Jupiter,Jupiter,"[{'role': 'user', 'content': 'What is the largest planet in our solar system?'}]"
What is the smallest country in the world?,China,Vatican City,"[{'role': 'user', 'content': 'What is the smallest country in the world?'}]"
What is the currency of Japan?,Yen,Yen,"[{'role': 'user', 'content': 'What is the currency of Japan?'}]"
What is the capital of France?,London,Paris,"[{""role"": ""user"", ""content"": ""What is the capital of France?""}]"
Who is the CEO of Meta?,Mark Zuckerberg,Mark Zuckerberg,"[{""role"": ""user"", ""content"": ""Who is the CEO of Meta?""}]"
What is the largest planet in our solar system?,Jupiter,Jupiter,"[{""role"": ""user"", ""content"": ""What is the largest planet in our solar system?""}]"
What is the smallest country in the world?,China,Vatican City,"[{""role"": ""user"", ""content"": ""What is the smallest country in the world?""}]"
What is the currency of Japan?,Yen,Yen,"[{""role"": ""user"", ""content"": ""What is the currency of Japan?""}]"

1 input_query generated_answer expected_answer chat_completion_input
2 What is the capital of France? London Paris [{'role': 'user', 'content': 'What is the capital of France?'}] [{"role": "user", "content": "What is the capital of France?"}]
3 Who is the CEO of Meta? Mark Zuckerberg Mark Zuckerberg [{'role': 'user', 'content': 'Who is the CEO of Meta?'}] [{"role": "user", "content": "Who is the CEO of Meta?"}]
4 What is the largest planet in our solar system? Jupiter Jupiter [{'role': 'user', 'content': 'What is the largest planet in our solar system?'}] [{"role": "user", "content": "What is the largest planet in our solar system?"}]
5 What is the smallest country in the world? China Vatican City [{'role': 'user', 'content': 'What is the smallest country in the world?'}] [{"role": "user", "content": "What is the smallest country in the world?"}]
6 What is the currency of Japan? Yen Yen [{'role': 'user', 'content': 'What is the currency of Japan?'}] [{"role": "user", "content": "What is the currency of Japan?"}]

View file

@ -9,13 +9,9 @@ import mimetypes
import os
from pathlib import Path
import pytest
# How to run this test:
#
# pytest llama_stack/providers/tests/datasetio/test_datasetio.py
# -m "meta_reference"
# -v -s --tb=short --disable-warnings
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasetio
def data_url_from_file(file_path: str) -> str:
@ -60,42 +56,29 @@ def register_dataset(llama_stack_client, for_generation=False, for_rag=False, da
"generated_answer": {"type": "string"},
}
dataset_providers = [x for x in llama_stack_client.providers.list() if x.api == "datasetio"]
dataset_provider_id = dataset_providers[0].provider_id
llama_stack_client.datasets.register(
dataset_id=dataset_id,
dataset_schema=dataset_schema,
url=dict(uri=test_url),
provider_id="localfs",
provider_id=dataset_provider_id,
)
def test_datasets_list(llama_stack_client):
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 0
def test_register_dataset(llama_stack_client):
def test_register_unregister_dataset(llama_stack_client):
register_dataset(llama_stack_client)
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 1
assert response[0].identifier == "test_dataset"
with pytest.raises(ValueError):
# unregister a dataset that does not exist
llama_stack_client.datasets.unregister("test_dataset2")
llama_stack_client.datasets.unregister("test_dataset")
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 0
with pytest.raises(ValueError):
llama_stack_client.datasets.unregister("test_dataset")
def test_get_rows_paginated(llama_stack_client):
register_dataset(llama_stack_client)

View file

@ -3,181 +3,87 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
import pytest
from llama_stack.apis.common.content_types import URL
from llama_stack.apis.common.type_system import ChatCompletionInputType, StringType
from llama_stack.apis.eval.eval import (
ModelCandidate,
)
from llama_stack.apis.inference import SamplingParams
from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams
from llama_stack.distribution.datatypes import Api
from ..datasetio.test_datasetio import register_dataset
from .constants import JUDGE_PROMPT
# How to run this test:
#
# pytest llama_stack/providers/tests/eval/test_eval.py
# -m "meta_reference_eval_together_inference_huggingface_datasetio"
# -v -s --tb=short --disable-warnings
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/eval
@pytest.mark.skip(reason="FIXME FIXME @yanxi0830 this needs to be migrated to use the API")
class Testeval:
@pytest.mark.asyncio
async def test_benchmarks_list(self, eval_stack):
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful
benchmarks_impl = eval_stack[Api.benchmarks]
response = await benchmarks_impl.list_benchmarks()
assert isinstance(response, list)
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
response = llama_stack_client.datasets.list()
assert any(x.identifier == "test_dataset_for_eval" for x in response)
@pytest.mark.asyncio
async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasetio_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasetio],
eval_stack[Api.datasets],
)
await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
response = await datasets_impl.list_datasets()
rows = await datasetio_impl.get_rows_paginated(
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset_for_eval",
rows_in_page=3,
)
assert len(rows.rows) == 3
scoring_functions = [
"basic::equality",
scoring_fn_id,
]
benchmark_id = "meta-reference::app_eval"
await benchmarks_impl.register_benchmark(
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
)
response = await eval_impl.evaluate_rows(
list_benchmarks = llama_stack_client.benchmarks.list()
assert any(x.identifier == benchmark_id for x in list_benchmarks)
response = llama_stack_client.eval.evaluate_rows(
benchmark_id=benchmark_id,
input_rows=rows.rows,
scoring_functions=scoring_functions,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
scoring_params={
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
judge_model=judge_model,
prompt_template=JUDGE_PROMPT,
judge_score_regexes=[
r"Total rating: (\d+)",
r"rating: (\d+)",
r"Rating: (\d+)",
],
)
benchmark_config={
"eval_candidate": {
"type": "model",
"model": text_model_id,
"sampling_params": {
"temperature": 0.0,
},
},
},
),
)
assert len(response.generations) == 3
assert "basic::equality" in response.scores
assert scoring_fn_id in response.scores
@pytest.mark.asyncio
async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval_2",
scoring_functions=[scoring_fn_id],
)
await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
scoring_functions = [
"basic::subset_of",
]
benchmark_id = "meta-reference::app_eval-2"
await benchmarks_impl.register_benchmark(
response = llama_stack_client.eval.run_eval(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
scoring_functions=scoring_functions,
)
response = await eval_impl.run_eval(
benchmark_id=benchmark_id,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
),
benchmark_config={
"eval_candidate": {
"type": "model",
"model": text_model_id,
"sampling_params": {
"temperature": 0.0,
},
},
},
)
assert response.job_id == "0"
job_status = await eval_impl.job_status(benchmark_id, response.job_id)
assert job_status and job_status.value == "completed"
eval_response = await eval_impl.job_result(benchmark_id, response.job_id)
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
assert job_status and job_status == "completed"
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
assert eval_response is not None
assert len(eval_response.generations) == 5
assert "basic::subset_of" in eval_response.scores
@pytest.mark.asyncio
async def test_eval_run_benchmark_eval(self, eval_stack, inference_model):
eval_impl, benchmarks_impl, datasets_impl = (
eval_stack[Api.eval],
eval_stack[Api.benchmarks],
eval_stack[Api.datasets],
)
response = await datasets_impl.list_datasets()
assert len(response) > 0
if response[0].provider_id != "huggingface":
pytest.skip("Only huggingface provider supports pre-registered remote datasets")
await datasets_impl.register_dataset(
dataset_id="mmlu",
dataset_schema={
"input_query": StringType(),
"expected_answer": StringType(),
"chat_completion_input": ChatCompletionInputType(),
},
url=URL(uri="https://huggingface.co/datasets/llamastack/evals"),
metadata={
"path": "llamastack/evals",
"name": "evals__mmlu__details",
"split": "train",
},
)
# register eval task
await benchmarks_impl.register_benchmark(
benchmark_id="meta-reference-mmlu",
dataset_id="mmlu",
scoring_functions=["basic::regex_parser_multiple_choice_answer"],
)
# list benchmarks
response = await benchmarks_impl.list_benchmarks()
assert len(response) > 0
benchmark_id = "meta-reference-mmlu"
response = await eval_impl.run_eval(
benchmark_id=benchmark_id,
benchmark_config=dict(
eval_candidate=ModelCandidate(
model=inference_model,
sampling_params=SamplingParams(),
),
num_examples=3,
),
)
job_status = await eval_impl.job_status(benchmark_id, response.job_id)
assert job_status and job_status.value == "completed"
eval_response = await eval_impl.job_result(benchmark_id, response.job_id)
assert eval_response is not None
assert len(eval_response.generations) == 3
assert scoring_fn_id in eval_response.scores

View file

@ -15,14 +15,70 @@ def sample_judge_prompt_template():
return "Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9."
@pytest.fixture
def sample_scoring_fn_id():
return "llm-as-judge-test-prompt"
def register_scoring_function(
llama_stack_client,
provider_id,
scoring_fn_id,
judge_model_id,
judge_prompt_template,
):
llama_stack_client.scoring_functions.register(
scoring_fn_id=scoring_fn_id,
provider_id=provider_id,
description="LLM as judge scoring function with test prompt",
return_type={
"type": "string",
},
params={
"type": "llm_as_judge",
"judge_model": judge_model_id,
"prompt_template": judge_prompt_template,
},
)
def test_scoring_functions_list(llama_stack_client):
# NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful
response = llama_stack_client.scoring_functions.list()
assert isinstance(response, list)
assert len(response) > 0
def test_scoring_functions_register(
llama_stack_client,
sample_scoring_fn_id,
judge_model_id,
sample_judge_prompt_template,
):
llm_as_judge_provider = [
x
for x in llama_stack_client.providers.list()
if x.api == "scoring" and x.provider_type == "inline::llm-as-judge"
]
if len(llm_as_judge_provider) == 0:
pytest.skip("No llm-as-judge provider found, cannot test registeration")
llm_as_judge_provider_id = llm_as_judge_provider[0].provider_id
register_scoring_function(
llama_stack_client,
llm_as_judge_provider_id,
sample_scoring_fn_id,
judge_model_id,
sample_judge_prompt_template,
)
list_response = llama_stack_client.scoring_functions.list()
assert isinstance(list_response, list)
assert len(list_response) > 0
assert any(x.identifier == sample_scoring_fn_id for x in list_response)
# TODO: add unregister api for scoring functions
def test_scoring_score(llama_stack_client):
register_dataset(llama_stack_client, for_rag=True)
response = llama_stack_client.datasets.list()
@ -106,8 +162,17 @@ def test_scoring_score_with_params_llm_as_judge(llama_stack_client, sample_judge
assert len(response.results[x].score_rows) == 5
@pytest.mark.skip(reason="Skipping because this seems to be really slow")
def test_scoring_score_with_aggregation_functions(llama_stack_client, sample_judge_prompt_template, judge_model_id):
@pytest.mark.parametrize(
"provider_id",
[
"basic",
"llm-as-judge",
"braintrust",
],
)
def test_scoring_score_with_aggregation_functions(
llama_stack_client, sample_judge_prompt_template, judge_model_id, provider_id
):
register_dataset(llama_stack_client, for_rag=True)
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
@ -115,7 +180,10 @@ def test_scoring_score_with_aggregation_functions(llama_stack_client, sample_jud
)
assert len(rows.rows) == 3
scoring_fns_list = llama_stack_client.scoring_functions.list()
scoring_fns_list = [x for x in llama_stack_client.scoring_functions.list() if x.provider_id == provider_id]
if len(scoring_fns_list) == 0:
pytest.skip(f"No scoring functions found for provider {provider_id}, skipping")
scoring_functions = {}
aggr_fns = [
"accuracy",
@ -123,30 +191,31 @@ def test_scoring_score_with_aggregation_functions(llama_stack_client, sample_jud
"categorical_count",
"average",
]
for x in scoring_fns_list:
if x.provider_id == "llm-as-judge":
scoring_fn = scoring_fns_list[0]
if scoring_fn.provider_id == "llm-as-judge":
aggr_fns = ["categorical_count"]
scoring_functions[x.identifier] = dict(
scoring_functions[scoring_fn.identifier] = dict(
type="llm_as_judge",
judge_model=judge_model_id,
prompt_template=sample_judge_prompt_template,
judge_score_regexes=[r"Score: (\d+)"],
aggregation_functions=aggr_fns,
)
elif x.provider_id == "basic" or x.provider_id == "braintrust":
if "regex_parser" in x.identifier:
scoring_functions[x.identifier] = dict(
elif scoring_fn.provider_id == "basic" or scoring_fn.provider_id == "braintrust":
if "regex_parser" in scoring_fn.identifier:
scoring_functions[scoring_fn.identifier] = dict(
type="regex_parser",
parsing_regexes=[r"Score: (\d+)"],
aggregation_functions=aggr_fns,
)
else:
scoring_functions[x.identifier] = dict(
scoring_functions[scoring_fn.identifier] = dict(
type="basic",
aggregation_functions=aggr_fns,
)
else:
scoring_functions[x.identifier] = None
scoring_functions[scoring_fn.identifier] = None
response = llama_stack_client.scoring.score(
input_rows=rows.rows,