mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-12 04:50:39 +00:00
fix eval
This commit is contained in:
parent
6e65b9282d
commit
2541dcc162
1 changed files with 28 additions and 153 deletions
|
@ -57,158 +57,33 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
assert scoring_fn_id in response.scores
|
assert scoring_fn_id in response.scores
|
||||||
|
|
||||||
|
|
||||||
# @pytest.mark.skip(reason="FIXME FIXME @yanxi0830 this needs to be migrated to use the API")
|
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||||
# class Testeval:
|
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
# @pytest.mark.asyncio
|
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
|
||||||
# async def test_benchmarks_list(self, eval_stack):
|
benchmark_id = str(uuid.uuid4())
|
||||||
# # NOTE: this needs you to ensure that you are starting from a clean state
|
llama_stack_client.benchmarks.register(
|
||||||
# # but so far we don't have an unregister API unfortunately, so be careful
|
benchmark_id=benchmark_id,
|
||||||
# benchmarks_impl = eval_stack[Api.benchmarks]
|
dataset_id="test_dataset_for_eval_2",
|
||||||
# response = await benchmarks_impl.list_benchmarks()
|
scoring_functions=[scoring_fn_id],
|
||||||
# assert isinstance(response, list)
|
)
|
||||||
|
|
||||||
# @pytest.mark.asyncio
|
response = llama_stack_client.eval.run_eval(
|
||||||
# async def test_eval_evaluate_rows(self, eval_stack, inference_model, judge_model):
|
benchmark_id=benchmark_id,
|
||||||
# eval_impl, benchmarks_impl, datasetio_impl, datasets_impl = (
|
benchmark_config={
|
||||||
# eval_stack[Api.eval],
|
"eval_candidate": {
|
||||||
# eval_stack[Api.benchmarks],
|
"type": "model",
|
||||||
# eval_stack[Api.datasetio],
|
"model": text_model_id,
|
||||||
# eval_stack[Api.datasets],
|
"sampling_params": {
|
||||||
# )
|
"temperature": 0.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
assert response.job_id == "0"
|
||||||
|
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||||
|
assert job_status and job_status.value == "completed"
|
||||||
|
|
||||||
# await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
|
eval_response = llama_stack_client.eval.jobs.result(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||||
# response = await datasets_impl.list_datasets()
|
assert eval_response is not None
|
||||||
|
assert len(eval_response.generations) == 5
|
||||||
# rows = await datasetio_impl.get_rows_paginated(
|
assert scoring_fn_id in eval_response.scores
|
||||||
# dataset_id="test_dataset_for_eval",
|
|
||||||
# rows_in_page=3,
|
|
||||||
# )
|
|
||||||
# assert len(rows.rows) == 3
|
|
||||||
|
|
||||||
# scoring_functions = [
|
|
||||||
# "basic::equality",
|
|
||||||
# ]
|
|
||||||
# benchmark_id = "meta-reference::app_eval"
|
|
||||||
# await benchmarks_impl.register_benchmark(
|
|
||||||
# benchmark_id=benchmark_id,
|
|
||||||
# dataset_id="test_dataset_for_eval",
|
|
||||||
# scoring_functions=scoring_functions,
|
|
||||||
# )
|
|
||||||
# response = await eval_impl.evaluate_rows(
|
|
||||||
# benchmark_id=benchmark_id,
|
|
||||||
# input_rows=rows.rows,
|
|
||||||
# scoring_functions=scoring_functions,
|
|
||||||
# benchmark_config=dict(
|
|
||||||
# eval_candidate=ModelCandidate(
|
|
||||||
# model=inference_model,
|
|
||||||
# sampling_params=SamplingParams(),
|
|
||||||
# ),
|
|
||||||
# scoring_params={
|
|
||||||
# "meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
|
|
||||||
# judge_model=judge_model,
|
|
||||||
# prompt_template=JUDGE_PROMPT,
|
|
||||||
# judge_score_regexes=[
|
|
||||||
# r"Total rating: (\d+)",
|
|
||||||
# r"rating: (\d+)",
|
|
||||||
# r"Rating: (\d+)",
|
|
||||||
# ],
|
|
||||||
# )
|
|
||||||
# },
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
# assert len(response.generations) == 3
|
|
||||||
# assert "basic::equality" in response.scores
|
|
||||||
|
|
||||||
# @pytest.mark.asyncio
|
|
||||||
# async def test_eval_run_eval(self, eval_stack, inference_model, judge_model):
|
|
||||||
# eval_impl, benchmarks_impl, datasets_impl = (
|
|
||||||
# eval_stack[Api.eval],
|
|
||||||
# eval_stack[Api.benchmarks],
|
|
||||||
# eval_stack[Api.datasets],
|
|
||||||
# )
|
|
||||||
|
|
||||||
# await register_dataset(datasets_impl, for_generation=True, dataset_id="test_dataset_for_eval")
|
|
||||||
|
|
||||||
# scoring_functions = [
|
|
||||||
# "basic::subset_of",
|
|
||||||
# ]
|
|
||||||
|
|
||||||
# benchmark_id = "meta-reference::app_eval-2"
|
|
||||||
# await benchmarks_impl.register_benchmark(
|
|
||||||
# benchmark_id=benchmark_id,
|
|
||||||
# dataset_id="test_dataset_for_eval",
|
|
||||||
# scoring_functions=scoring_functions,
|
|
||||||
# )
|
|
||||||
# response = await eval_impl.run_eval(
|
|
||||||
# benchmark_id=benchmark_id,
|
|
||||||
# benchmark_config=dict(
|
|
||||||
# eval_candidate=ModelCandidate(
|
|
||||||
# model=inference_model,
|
|
||||||
# sampling_params=SamplingParams(),
|
|
||||||
# ),
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
# assert response.job_id == "0"
|
|
||||||
# job_status = await eval_impl.job_status(benchmark_id, response.job_id)
|
|
||||||
# assert job_status and job_status.value == "completed"
|
|
||||||
# eval_response = await eval_impl.job_result(benchmark_id, response.job_id)
|
|
||||||
|
|
||||||
# assert eval_response is not None
|
|
||||||
# assert len(eval_response.generations) == 5
|
|
||||||
# assert "basic::subset_of" in eval_response.scores
|
|
||||||
|
|
||||||
# @pytest.mark.asyncio
|
|
||||||
# async def test_eval_run_benchmark_eval(self, eval_stack, inference_model):
|
|
||||||
# eval_impl, benchmarks_impl, datasets_impl = (
|
|
||||||
# eval_stack[Api.eval],
|
|
||||||
# eval_stack[Api.benchmarks],
|
|
||||||
# eval_stack[Api.datasets],
|
|
||||||
# )
|
|
||||||
|
|
||||||
# response = await datasets_impl.list_datasets()
|
|
||||||
# assert len(response) > 0
|
|
||||||
# if response[0].provider_id != "huggingface":
|
|
||||||
# pytest.skip("Only huggingface provider supports pre-registered remote datasets")
|
|
||||||
|
|
||||||
# await datasets_impl.register_dataset(
|
|
||||||
# dataset_id="mmlu",
|
|
||||||
# dataset_schema={
|
|
||||||
# "input_query": StringType(),
|
|
||||||
# "expected_answer": StringType(),
|
|
||||||
# "chat_completion_input": ChatCompletionInputType(),
|
|
||||||
# },
|
|
||||||
# url=URL(uri="https://huggingface.co/datasets/llamastack/evals"),
|
|
||||||
# metadata={
|
|
||||||
# "path": "llamastack/evals",
|
|
||||||
# "name": "evals__mmlu__details",
|
|
||||||
# "split": "train",
|
|
||||||
# },
|
|
||||||
# )
|
|
||||||
|
|
||||||
# # register eval task
|
|
||||||
# await benchmarks_impl.register_benchmark(
|
|
||||||
# benchmark_id="meta-reference-mmlu",
|
|
||||||
# dataset_id="mmlu",
|
|
||||||
# scoring_functions=["basic::regex_parser_multiple_choice_answer"],
|
|
||||||
# )
|
|
||||||
|
|
||||||
# # list benchmarks
|
|
||||||
# response = await benchmarks_impl.list_benchmarks()
|
|
||||||
# assert len(response) > 0
|
|
||||||
|
|
||||||
# benchmark_id = "meta-reference-mmlu"
|
|
||||||
# response = await eval_impl.run_eval(
|
|
||||||
# benchmark_id=benchmark_id,
|
|
||||||
# benchmark_config=dict(
|
|
||||||
# eval_candidate=ModelCandidate(
|
|
||||||
# model=inference_model,
|
|
||||||
# sampling_params=SamplingParams(),
|
|
||||||
# ),
|
|
||||||
# num_examples=3,
|
|
||||||
# ),
|
|
||||||
# )
|
|
||||||
# job_status = await eval_impl.job_status(benchmark_id, response.job_id)
|
|
||||||
# assert job_status and job_status.value == "completed"
|
|
||||||
# eval_response = await eval_impl.job_result(benchmark_id, response.job_id)
|
|
||||||
# assert eval_response is not None
|
|
||||||
# assert len(eval_response.generations) == 3
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue