mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-03 09:21:45 +00:00
parameterize judge_model
This commit is contained in:
parent
a4bcfb8bba
commit
3b5a33d921
3 changed files with 28 additions and 19 deletions
|
@ -47,6 +47,7 @@ def pytest_configure(config):
|
||||||
for fixture_name in [
|
for fixture_name in [
|
||||||
"basic_scoring_together_inference",
|
"basic_scoring_together_inference",
|
||||||
"braintrust_scoring_together_inference",
|
"braintrust_scoring_together_inference",
|
||||||
|
"llm_as_judge_scoring_together_inference",
|
||||||
]:
|
]:
|
||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers",
|
"markers",
|
||||||
|
@ -61,9 +62,23 @@ def pytest_addoption(parser):
|
||||||
default="meta-llama/Llama-3.2-3B-Instruct",
|
default="meta-llama/Llama-3.2-3B-Instruct",
|
||||||
help="Specify the inference model to use for testing",
|
help="Specify the inference model to use for testing",
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--judge-model",
|
||||||
|
action="store",
|
||||||
|
default="meta-llama/Llama-3.1-8B-Instruct",
|
||||||
|
help="Specify the judge model to use for testing",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def pytest_generate_tests(metafunc):
|
def pytest_generate_tests(metafunc):
|
||||||
|
judge_model = metafunc.config.getoption("--judge-model")
|
||||||
|
if "judge_model" in metafunc.fixturenames:
|
||||||
|
metafunc.parametrize(
|
||||||
|
"judge_model",
|
||||||
|
[pytest.param(judge_model, id="")],
|
||||||
|
indirect=True,
|
||||||
|
)
|
||||||
|
|
||||||
if "scoring_stack" in metafunc.fixturenames:
|
if "scoring_stack" in metafunc.fixturenames:
|
||||||
available_fixtures = {
|
available_fixtures = {
|
||||||
"scoring": SCORING_FIXTURES,
|
"scoring": SCORING_FIXTURES,
|
||||||
|
|
|
@ -21,6 +21,13 @@ def scoring_remote() -> ProviderFixture:
|
||||||
return remote_stack_fixture()
|
return remote_stack_fixture()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def judge_model(request):
|
||||||
|
if hasattr(request, "param"):
|
||||||
|
return request.param
|
||||||
|
return request.config.getoption("--judge-model", None)
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def scoring_basic() -> ProviderFixture:
|
def scoring_basic() -> ProviderFixture:
|
||||||
return ProviderFixture(
|
return ProviderFixture(
|
||||||
|
@ -66,7 +73,7 @@ SCORING_FIXTURES = ["basic", "remote", "braintrust", "llm_as_judge"]
|
||||||
|
|
||||||
|
|
||||||
@pytest_asyncio.fixture(scope="session")
|
@pytest_asyncio.fixture(scope="session")
|
||||||
async def scoring_stack(request, inference_model):
|
async def scoring_stack(request, inference_model, judge_model):
|
||||||
fixture_dict = request.param
|
fixture_dict = request.param
|
||||||
|
|
||||||
providers = {}
|
providers = {}
|
||||||
|
@ -85,8 +92,7 @@ async def scoring_stack(request, inference_model):
|
||||||
ModelInput(model_id=model)
|
ModelInput(model_id=model)
|
||||||
for model in [
|
for model in [
|
||||||
inference_model,
|
inference_model,
|
||||||
"Llama3.1-405B-Instruct",
|
judge_model,
|
||||||
"Llama3.1-8B-Instruct",
|
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -64,12 +64,6 @@ class TestScoring:
|
||||||
response = await datasets_impl.list_datasets()
|
response = await datasets_impl.list_datasets()
|
||||||
assert len(response) == 1
|
assert len(response) == 1
|
||||||
|
|
||||||
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
|
|
||||||
await models_impl.register_model(
|
|
||||||
model_id=model_id,
|
|
||||||
provider_id="",
|
|
||||||
)
|
|
||||||
|
|
||||||
# scoring individual rows
|
# scoring individual rows
|
||||||
rows = await datasetio_impl.get_rows_paginated(
|
rows = await datasetio_impl.get_rows_paginated(
|
||||||
dataset_id="test_dataset",
|
dataset_id="test_dataset",
|
||||||
|
@ -103,7 +97,7 @@ class TestScoring:
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_scoring_score_with_params_llm_as_judge(
|
async def test_scoring_score_with_params_llm_as_judge(
|
||||||
self, scoring_stack, sample_judge_prompt_template
|
self, scoring_stack, sample_judge_prompt_template, judge_model
|
||||||
):
|
):
|
||||||
(
|
(
|
||||||
scoring_impl,
|
scoring_impl,
|
||||||
|
@ -122,12 +116,6 @@ class TestScoring:
|
||||||
response = await datasets_impl.list_datasets()
|
response = await datasets_impl.list_datasets()
|
||||||
assert len(response) == 1
|
assert len(response) == 1
|
||||||
|
|
||||||
for model_id in ["Llama3.1-405B-Instruct"]:
|
|
||||||
await models_impl.register_model(
|
|
||||||
model_id=model_id,
|
|
||||||
provider_id="",
|
|
||||||
)
|
|
||||||
|
|
||||||
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
|
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
|
||||||
provider_id = scoring_fns_list[0].provider_id
|
provider_id = scoring_fns_list[0].provider_id
|
||||||
if provider_id == "braintrust" or provider_id == "basic":
|
if provider_id == "braintrust" or provider_id == "basic":
|
||||||
|
@ -142,7 +130,7 @@ class TestScoring:
|
||||||
|
|
||||||
scoring_functions = {
|
scoring_functions = {
|
||||||
"llm-as-judge::base": LLMAsJudgeScoringFnParams(
|
"llm-as-judge::base": LLMAsJudgeScoringFnParams(
|
||||||
judge_model="Llama3.1-405B-Instruct",
|
judge_model=judge_model,
|
||||||
prompt_template=sample_judge_prompt_template,
|
prompt_template=sample_judge_prompt_template,
|
||||||
judge_score_regexes=[r"Score: (\d+)"],
|
judge_score_regexes=[r"Score: (\d+)"],
|
||||||
aggregation_functions=[AggregationFunctionType.categorical_count],
|
aggregation_functions=[AggregationFunctionType.categorical_count],
|
||||||
|
@ -170,7 +158,7 @@ class TestScoring:
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_scoring_score_with_aggregation_functions(
|
async def test_scoring_score_with_aggregation_functions(
|
||||||
self, scoring_stack, sample_judge_prompt_template
|
self, scoring_stack, sample_judge_prompt_template, judge_model
|
||||||
):
|
):
|
||||||
(
|
(
|
||||||
scoring_impl,
|
scoring_impl,
|
||||||
|
@ -204,7 +192,7 @@ class TestScoring:
|
||||||
if x.provider_id == "llm-as-judge":
|
if x.provider_id == "llm-as-judge":
|
||||||
aggr_fns = [AggregationFunctionType.categorical_count]
|
aggr_fns = [AggregationFunctionType.categorical_count]
|
||||||
scoring_functions[x.identifier] = LLMAsJudgeScoringFnParams(
|
scoring_functions[x.identifier] = LLMAsJudgeScoringFnParams(
|
||||||
judge_model="Llama3.1-405B-Instruct",
|
judge_model=judge_model,
|
||||||
prompt_template=sample_judge_prompt_template,
|
prompt_template=sample_judge_prompt_template,
|
||||||
judge_score_regexes=[r"Score: (\d+)"],
|
judge_score_regexes=[r"Score: (\d+)"],
|
||||||
aggregation_functions=aggr_fns,
|
aggregation_functions=aggr_fns,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue