mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
fix eval tests model registration
This commit is contained in:
parent
3b5a33d921
commit
00658e02f8
3 changed files with 26 additions and 24 deletions
|
@ -80,6 +80,13 @@ def pytest_addoption(parser):
|
||||||
help="Specify the inference model to use for testing",
|
help="Specify the inference model to use for testing",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.addoption(
|
||||||
|
"--judge-model",
|
||||||
|
action="store",
|
||||||
|
default="meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
help="Specify the judge model to use for testing",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def pytest_generate_tests(metafunc):
|
def pytest_generate_tests(metafunc):
|
||||||
if "eval_stack" in metafunc.fixturenames:
|
if "eval_stack" in metafunc.fixturenames:
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
|
|
||||||
from llama_stack.distribution.datatypes import Api, Provider
|
from llama_stack.distribution.datatypes import Api, ModelInput, Provider
|
||||||
|
|
||||||
from llama_stack.providers.tests.resolver import construct_stack_for_test
|
from llama_stack.providers.tests.resolver import construct_stack_for_test
|
||||||
from ..conftest import ProviderFixture, remote_stack_fixture
|
from ..conftest import ProviderFixture, remote_stack_fixture
|
||||||
|
@ -35,7 +35,7 @@ EVAL_FIXTURES = ["meta_reference", "remote"]
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture(scope="session")
|
@pytest_asyncio.fixture(scope="session")
|
||||||
async def eval_stack(request):
|
async def eval_stack(request, inference_model, judge_model):
|
||||||
fixture_dict = request.param
|
fixture_dict = request.param
|
||||||
|
|
||||||
providers = {}
|
providers = {}
|
||||||
|
@ -66,6 +66,13 @@ async def eval_stack(request):
|
||||||
],
|
],
|
||||||
providers,
|
providers,
|
||||||
provider_data,
|
provider_data,
|
||||||
|
models=[
|
||||||
|
ModelInput(model_id=model)
|
||||||
|
for model in [
|
||||||
|
inference_model,
|
||||||
|
judge_model,
|
||||||
|
]
|
||||||
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
return test_stack.impls
|
return test_stack.impls
|
||||||
|
|
|
@ -38,7 +38,7 @@ class Testeval:
|
||||||
assert isinstance(response, list)
|
assert isinstance(response, list)
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_eval_evaluate_rows(self, eval_stack):
|
async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model):
|
||||||
eval_impl, eval_tasks_impl, datasetio_impl, datasets_impl, models_impl = (
|
eval_impl, eval_tasks_impl, datasetio_impl, datasets_impl, models_impl = (
|
||||||
eval_stack[Api.eval],
|
eval_stack[Api.eval],
|
||||||
eval_stack[Api.eval_tasks],
|
eval_stack[Api.eval_tasks],
|
||||||
|
@ -46,11 +46,7 @@ class Testeval:
|
||||||
eval_stack[Api.datasets],
|
eval_stack[Api.datasets],
|
||||||
eval_stack[Api.models],
|
eval_stack[Api.models],
|
||||||
)
|
)
|
||||||
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
|
|
||||||
await models_impl.register_model(
|
|
||||||
model_id=model_id,
|
|
||||||
provider_id="",
|
|
||||||
)
|
|
||||||
await register_dataset(
|
await register_dataset(
|
||||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||||
)
|
)
|
||||||
|
@ -77,12 +73,12 @@ class Testeval:
|
||||||
scoring_functions=scoring_functions,
|
scoring_functions=scoring_functions,
|
||||||
task_config=AppEvalTaskConfig(
|
task_config=AppEvalTaskConfig(
|
||||||
eval_candidate=ModelCandidate(
|
eval_candidate=ModelCandidate(
|
||||||
model="Llama3.2-3B-Instruct",
|
model=inference_model,
|
||||||
sampling_params=SamplingParams(),
|
sampling_params=SamplingParams(),
|
||||||
),
|
),
|
||||||
scoring_params={
|
scoring_params={
|
||||||
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
|
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
|
||||||
judge_model="Llama3.1-8B-Instruct",
|
judge_model=judge_model,
|
||||||
prompt_template=JUDGE_PROMPT,
|
prompt_template=JUDGE_PROMPT,
|
||||||
judge_score_regexes=[
|
judge_score_regexes=[
|
||||||
r"Total rating: (\d+)",
|
r"Total rating: (\d+)",
|
||||||
|
@ -97,18 +93,14 @@ class Testeval:
|
||||||
assert "basic::equality" in response.scores
|
assert "basic::equality" in response.scores
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_eval_run_eval(self, eval_stack):
|
async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):
|
||||||
eval_impl, eval_tasks_impl, datasets_impl, models_impl = (
|
eval_impl, eval_tasks_impl, datasets_impl, models_impl = (
|
||||||
eval_stack[Api.eval],
|
eval_stack[Api.eval],
|
||||||
eval_stack[Api.eval_tasks],
|
eval_stack[Api.eval_tasks],
|
||||||
eval_stack[Api.datasets],
|
eval_stack[Api.datasets],
|
||||||
eval_stack[Api.models],
|
eval_stack[Api.models],
|
||||||
)
|
)
|
||||||
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
|
|
||||||
await models_impl.register_model(
|
|
||||||
model_id=model_id,
|
|
||||||
provider_id="",
|
|
||||||
)
|
|
||||||
await register_dataset(
|
await register_dataset(
|
||||||
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||||
)
|
)
|
||||||
|
@ -127,7 +119,7 @@ class Testeval:
|
||||||
task_id=task_id,
|
task_id=task_id,
|
||||||
task_config=AppEvalTaskConfig(
|
task_config=AppEvalTaskConfig(
|
||||||
eval_candidate=ModelCandidate(
|
eval_candidate=ModelCandidate(
|
||||||
model="Llama3.2-3B-Instruct",
|
model=inference_model,
|
||||||
sampling_params=SamplingParams(),
|
sampling_params=SamplingParams(),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
@ -142,18 +134,14 @@ class Testeval:
|
||||||
assert "basic::subset_of" in eval_response.scores
|
assert "basic::subset_of" in eval_response.scores
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_eval_run_benchmark_eval(self, eval_stack):
|
async def test_eval_run_benchmark_eval(self, eval_stack, inference_model):
|
||||||
eval_impl, eval_tasks_impl, datasets_impl, models_impl = (
|
eval_impl, eval_tasks_impl, datasets_impl, models_impl = (
|
||||||
eval_stack[Api.eval],
|
eval_stack[Api.eval],
|
||||||
eval_stack[Api.eval_tasks],
|
eval_stack[Api.eval_tasks],
|
||||||
eval_stack[Api.datasets],
|
eval_stack[Api.datasets],
|
||||||
eval_stack[Api.models],
|
eval_stack[Api.models],
|
||||||
)
|
)
|
||||||
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
|
|
||||||
await models_impl.register_model(
|
|
||||||
model_id=model_id,
|
|
||||||
provider_id="",
|
|
||||||
)
|
|
||||||
response = await datasets_impl.list_datasets()
|
response = await datasets_impl.list_datasets()
|
||||||
assert len(response) > 0
|
assert len(response) > 0
|
||||||
if response[0].provider_id != "huggingface":
|
if response[0].provider_id != "huggingface":
|
||||||
|
@ -192,7 +180,7 @@ class Testeval:
|
||||||
task_id=benchmark_id,
|
task_id=benchmark_id,
|
||||||
task_config=BenchmarkEvalTaskConfig(
|
task_config=BenchmarkEvalTaskConfig(
|
||||||
eval_candidate=ModelCandidate(
|
eval_candidate=ModelCandidate(
|
||||||
model="Llama3.2-3B-Instruct",
|
model=inference_model,
|
||||||
sampling_params=SamplingParams(),
|
sampling_params=SamplingParams(),
|
||||||
),
|
),
|
||||||
num_examples=3,
|
num_examples=3,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue