[Evals API][6/n] meta-reference llm as judge, registration for ScoringFnDefs (#330)

* wip scoring refactor

* llm as judge, move folders

* test full generation + eval

* extract score regex to llm context

* remove prints, cleanup braintrust in this branch

* change json -> class

* remove initialize

* address nits

* check identifier prefix

* udpate MANIFEST
This commit is contained in:
Xi Yan 2024-10-28 14:08:42 -07:00 committed by GitHub
parent 04a4784287
commit 7b8748c53e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 360 additions and 50 deletions

View file

@ -70,6 +70,7 @@ async def register_dataset(
if for_generation:
dataset_schema = {
"expected_answer": StringType(),
"input_query": StringType(),
"chat_completion_input": ChatCompletionInputType(),
}
else:

View file

@ -16,3 +16,7 @@ providers:
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009
- provider_id: test-tgi-2
provider_type: remote::tgi
config:
url: http://127.0.0.1:5010

View file

@ -65,7 +65,10 @@ async def test_eval(eval_settings):
model="Llama3.2-1B-Instruct",
sampling_params=SamplingParams(),
),
scoring_functions=["subset_of"],
scoring_functions=[
"meta-reference::subset_of",
"meta-reference::llm_as_judge_8b_correctness",
],
)
assert response.job_id == "0"
job_status = await eval_impl.job_status(response.job_id)
@ -76,4 +79,5 @@ async def test_eval(eval_settings):
assert eval_response is not None
assert len(eval_response.generations) == 5
assert "subset_of" in eval_response.scores
assert "meta-reference::subset_of" in eval_response.scores
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores

View file

@ -7,3 +7,8 @@ providers:
- provider_id: test-meta
provider_type: meta-reference
config: {}
inference:
- provider_id: tgi0
provider_type: remote::tgi
config:
url: http://127.0.0.1:5009

View file

@ -33,7 +33,9 @@ from llama_stack.providers.tests.resolver import resolve_impls_for_test
@pytest_asyncio.fixture(scope="session")
async def scoring_settings():
impls = await resolve_impls_for_test(Api.scoring, deps=[Api.datasetio])
impls = await resolve_impls_for_test(
Api.scoring, deps=[Api.datasetio, Api.inference]
)
return {
"scoring_impl": impls[Api.scoring],
"scoring_functions_impl": impls[Api.scoring_functions],
@ -48,7 +50,50 @@ async def test_scoring_functions_list(scoring_settings):
assert isinstance(scoring_functions, list)
assert len(scoring_functions) > 0
function_ids = [f.identifier for f in scoring_functions]
assert "equality" in function_ids
assert "meta-reference::equality" in function_ids
assert "meta-reference::subset_of" in function_ids
assert "meta-reference::llm_as_judge_8b_correctness" in function_ids
@pytest.mark.asyncio
async def test_scoring_functions_register(scoring_settings):
scoring_impl = scoring_settings["scoring_impl"]
scoring_functions_impl = scoring_settings["scoring_functions_impl"]
datasets_impl = scoring_settings["datasets_impl"]
test_prompt = """Output a number between 0 to 10. Your answer must match the format \n Number: <answer>"""
# register the scoring function
await scoring_functions_impl.register_scoring_function(
ScoringFnDefWithProvider(
identifier="meta-reference::llm_as_judge_8b_random",
description="Llm As Judge Scoring Function",
parameters=[],
return_type=NumberType(),
context=LLMAsJudgeContext(
prompt_template=test_prompt,
judge_model="Llama3.1-8B-Instruct",
judge_score_regex=[r"Number: (\d+)"],
),
provider_id="test-meta",
)
)
scoring_functions = await scoring_functions_impl.list_scoring_functions()
assert isinstance(scoring_functions, list)
assert len(scoring_functions) > 0
function_ids = [f.identifier for f in scoring_functions]
assert "meta-reference::llm_as_judge_8b_random" in function_ids
# test score using newly registered scoring function
await register_dataset(datasets_impl)
response = await datasets_impl.list_datasets()
assert len(response) == 1
response = await scoring_impl.score_batch(
dataset_id=response[0].identifier,
scoring_functions=[
"meta-reference::llm_as_judge_8b_random",
],
)
assert "meta-reference::llm_as_judge_8b_random" in response.results
@pytest.mark.asyncio
@ -62,8 +107,14 @@ async def test_scoring_score(scoring_settings):
response = await scoring_impl.score_batch(
dataset_id=response[0].identifier,
scoring_functions=["equality"],
scoring_functions=[
"meta-reference::equality",
"meta-reference::subset_of",
"meta-reference::llm_as_judge_8b_correctness",
],
)
assert len(response.results) == 1
assert "equality" in response.results
assert len(response.results) == 3
assert "meta-reference::equality" in response.results
assert "meta-reference::subset_of" in response.results
assert "meta-reference::llm_as_judge_8b_correctness" in response.results