mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-02 20:40:36 +00:00
Folder restructure for evals/datasets/scoring (#419)
* rename evals related stuff * fix datasetio * fix scoring test * localfs -> LocalFS * refactor scoring * refactor scoring * remove 8b_correctness scoring_fn from tests * tests w/ eval params * scoring fn braintrust fixture * import
This commit is contained in:
parent
2b7d70ba86
commit
b4416b72fd
37 changed files with 141 additions and 100 deletions
|
@ -16,7 +16,7 @@ DEFAULT_PROVIDER_COMBINATIONS = [
|
|||
pytest.param(
|
||||
{
|
||||
"scoring": "meta_reference",
|
||||
"datasetio": "meta_reference",
|
||||
"datasetio": "localfs",
|
||||
"inference": "fireworks",
|
||||
},
|
||||
id="meta_reference_scoring_fireworks_inference",
|
||||
|
@ -25,12 +25,21 @@ DEFAULT_PROVIDER_COMBINATIONS = [
|
|||
pytest.param(
|
||||
{
|
||||
"scoring": "meta_reference",
|
||||
"datasetio": "meta_reference",
|
||||
"datasetio": "localfs",
|
||||
"inference": "together",
|
||||
},
|
||||
id="meta_reference_scoring_together_inference",
|
||||
marks=pytest.mark.meta_reference_scoring_together_inference,
|
||||
),
|
||||
pytest.param(
|
||||
{
|
||||
"scoring": "braintrust",
|
||||
"datasetio": "localfs",
|
||||
"inference": "together",
|
||||
},
|
||||
id="braintrust_scoring_together_inference",
|
||||
marks=pytest.mark.braintrust_scoring_together_inference,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
@ -38,6 +47,7 @@ def pytest_configure(config):
|
|||
for fixture_name in [
|
||||
"meta_reference_scoring_fireworks_inference",
|
||||
"meta_reference_scoring_together_inference",
|
||||
"braintrust_scoring_together_inference",
|
||||
]:
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
|
|
|
@ -31,7 +31,20 @@ def scoring_meta_reference() -> ProviderFixture:
|
|||
)
|
||||
|
||||
|
||||
SCORING_FIXTURES = ["meta_reference", "remote"]
|
||||
@pytest.fixture(scope="session")
|
||||
def scoring_braintrust() -> ProviderFixture:
|
||||
return ProviderFixture(
|
||||
providers=[
|
||||
Provider(
|
||||
provider_id="braintrust",
|
||||
provider_type="braintrust",
|
||||
config={},
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
SCORING_FIXTURES = ["meta_reference", "remote", "braintrust"]
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
|
@ -52,9 +65,4 @@ async def scoring_stack(request):
|
|||
provider_data,
|
||||
)
|
||||
|
||||
return (
|
||||
impls[Api.scoring],
|
||||
impls[Api.scoring_functions],
|
||||
impls[Api.datasetio],
|
||||
impls[Api.datasets],
|
||||
)
|
||||
return impls
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
import pytest
|
||||
|
||||
from llama_stack.apis.scoring_functions import * # noqa: F403
|
||||
|
||||
from llama_stack.distribution.datatypes import Api
|
||||
from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset
|
||||
|
||||
# How to run this test:
|
||||
|
@ -23,20 +23,36 @@ class TestScoring:
|
|||
async def test_scoring_functions_list(self, scoring_stack):
|
||||
# NOTE: this needs you to ensure that you are starting from a clean state
|
||||
# but so far we don't have an unregister API unfortunately, so be careful
|
||||
_, scoring_functions_impl, _, _ = scoring_stack
|
||||
scoring_functions_impl = scoring_stack[Api.scoring_functions]
|
||||
response = await scoring_functions_impl.list_scoring_functions()
|
||||
assert isinstance(response, list)
|
||||
assert len(response) > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scoring_score(self, scoring_stack):
|
||||
scoring_impl, scoring_functions_impl, datasetio_impl, datasets_impl = (
|
||||
scoring_stack
|
||||
(
|
||||
scoring_impl,
|
||||
scoring_functions_impl,
|
||||
datasetio_impl,
|
||||
datasets_impl,
|
||||
models_impl,
|
||||
) = (
|
||||
scoring_stack[Api.scoring],
|
||||
scoring_stack[Api.scoring_functions],
|
||||
scoring_stack[Api.datasetio],
|
||||
scoring_stack[Api.datasets],
|
||||
scoring_stack[Api.models],
|
||||
)
|
||||
await register_dataset(datasets_impl)
|
||||
response = await datasets_impl.list_datasets()
|
||||
assert len(response) == 1
|
||||
|
||||
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
|
||||
await models_impl.register_model(
|
||||
model_id=model_id,
|
||||
provider_id="",
|
||||
)
|
||||
|
||||
# scoring individual rows
|
||||
rows = await datasetio_impl.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
|
@ -44,10 +60,11 @@ class TestScoring:
|
|||
)
|
||||
assert len(rows.rows) == 3
|
||||
|
||||
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
|
||||
scoring_functions = {
|
||||
"meta-reference::llm_as_judge_8b_correctness": None,
|
||||
"meta-reference::equality": None,
|
||||
scoring_fns_list[0].identifier: None,
|
||||
}
|
||||
|
||||
response = await scoring_impl.score(
|
||||
input_rows=rows.rows,
|
||||
scoring_functions=scoring_functions,
|
||||
|
@ -69,13 +86,34 @@ class TestScoring:
|
|||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scoring_score_with_params(self, scoring_stack):
|
||||
scoring_impl, scoring_functions_impl, datasetio_impl, datasets_impl = (
|
||||
scoring_stack
|
||||
(
|
||||
scoring_impl,
|
||||
scoring_functions_impl,
|
||||
datasetio_impl,
|
||||
datasets_impl,
|
||||
models_impl,
|
||||
) = (
|
||||
scoring_stack[Api.scoring],
|
||||
scoring_stack[Api.scoring_functions],
|
||||
scoring_stack[Api.datasetio],
|
||||
scoring_stack[Api.datasets],
|
||||
scoring_stack[Api.models],
|
||||
)
|
||||
await register_dataset(datasets_impl)
|
||||
response = await datasets_impl.list_datasets()
|
||||
assert len(response) == 1
|
||||
|
||||
for model_id in ["Llama3.1-405B-Instruct"]:
|
||||
await models_impl.register_model(
|
||||
model_id=model_id,
|
||||
provider_id="",
|
||||
)
|
||||
|
||||
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
|
||||
provider_id = scoring_fns_list[0].provider_id
|
||||
if provider_id == "braintrust":
|
||||
pytest.skip("Braintrust provider does not support scoring with params")
|
||||
|
||||
# scoring individual rows
|
||||
rows = await datasetio_impl.get_rows_paginated(
|
||||
dataset_id="test_dataset",
|
||||
|
@ -84,7 +122,7 @@ class TestScoring:
|
|||
assert len(rows.rows) == 3
|
||||
|
||||
scoring_functions = {
|
||||
"meta-reference::llm_as_judge_8b_correctness": LLMAsJudgeScoringFnParams(
|
||||
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
|
||||
judge_model="Llama3.1-405B-Instruct",
|
||||
prompt_template="Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9.",
|
||||
judge_score_regexes=[r"Score: (\d+)"],
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue