This commit is contained in:
Xi Yan 2024-11-07 11:11:27 -08:00
parent 3322aa9ee4
commit 93995ecc4c
4 changed files with 54 additions and 2 deletions

View file

@ -58,7 +58,6 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
for impl in self.scoring_fn_id_impls.values() for impl in self.scoring_fn_id_impls.values()
for fn_def in impl.get_supported_scoring_fn_defs() for fn_def in impl.get_supported_scoring_fn_defs()
] ]
print("!!!", scoring_fn_defs_list)
for f in scoring_fn_defs_list: for f in scoring_fn_defs_list:
assert f.identifier.startswith( assert f.identifier.startswith(

View file

@ -31,7 +31,20 @@ def datasetio_meta_reference() -> ProviderFixture:
) )
DATASETIO_FIXTURES = ["meta_reference", "remote"] @pytest.fixture(scope="session")
def datasetio_huggingface() -> ProviderFixture:
return ProviderFixture(
providers=[
Provider(
provider_id="huggingface",
provider_type="huggingface",
config={},
)
],
)
DATASETIO_FIXTURES = ["meta_reference", "remote", "huggingface"]
@pytest_asyncio.fixture(scope="session") @pytest_asyncio.fixture(scope="session")

View file

@ -34,6 +34,16 @@ DEFAULT_PROVIDER_COMBINATIONS = [
id="meta_reference_eval_together_inference", id="meta_reference_eval_together_inference",
marks=pytest.mark.meta_reference_eval_together_inference, marks=pytest.mark.meta_reference_eval_together_inference,
), ),
pytest.param(
{
"eval": "meta_reference",
"scoring": "meta_reference",
"datasetio": "huggingface",
"inference": "together",
},
id="meta_reference_eval_together_inference_huggingface_datasetio",
marks=pytest.mark.meta_reference_eval_together_inference_huggingface_datasetio,
),
] ]
@ -41,6 +51,7 @@ def pytest_configure(config):
for fixture_name in [ for fixture_name in [
"meta_reference_eval_fireworks_inference", "meta_reference_eval_fireworks_inference",
"meta_reference_eval_together_inference", "meta_reference_eval_together_inference",
"meta_reference_eval_together_inference_huggingface_datasetio",
]: ]:
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",

View file

@ -98,3 +98,32 @@ class Testeval:
assert len(eval_response.generations) == 5 assert len(eval_response.generations) == 5
assert "meta-reference::subset_of" in eval_response.scores assert "meta-reference::subset_of" in eval_response.scores
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
@pytest.mark.asyncio
async def test_eval_run_benchmark_eval(self, eval_stack):
eval_impl, eval_tasks_impl, _, _, datasetio_impl, datasets_impl = eval_stack
response = await datasets_impl.list_datasets()
assert len(response) == 1
rows = await datasetio_impl.get_rows_paginated(
dataset_id="llamastack_mmlu",
rows_in_page=3,
)
assert len(rows.rows) == 3
scoring_functions = [
"meta-reference::regex_parser_multiple_choice_answer",
]
response = await eval_impl.evaluate_rows(
input_rows=rows.rows,
scoring_functions=scoring_functions,
eval_task_config=AppEvalTaskConfig(
eval_candidate=ModelCandidate(
model="Llama3.2-3B-Instruct",
sampling_params=SamplingParams(),
),
),
)
print(response)