mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-10 04:08:31 +00:00
fix notebook
This commit is contained in:
parent
06f01f95c1
commit
1a07703d8b
2 changed files with 5 additions and 15 deletions
|
@ -229,9 +229,7 @@ def run_evaluation_3():
|
||||||
output_res[scoring_fn] = []
|
output_res[scoring_fn] = []
|
||||||
output_res[scoring_fn].append(eval_res.scores[scoring_fn].score_rows[0])
|
output_res[scoring_fn].append(eval_res.scores[scoring_fn].score_rows[0])
|
||||||
|
|
||||||
progress_text_container.write(
|
progress_text_container.write(f"Expand to see current processed result ({i + 1} / {len(rows)})")
|
||||||
f"Expand to see current processed result ({i + 1} / {len(rows)})"
|
|
||||||
)
|
|
||||||
results_container.json(eval_res, expanded=2)
|
results_container.json(eval_res, expanded=2)
|
||||||
|
|
||||||
progress_bar.progress(1.0, text="Evaluation complete!")
|
progress_bar.progress(1.0, text="Evaluation complete!")
|
||||||
|
|
|
@ -16,9 +16,7 @@ from ..datasetio.test_datasetio import register_dataset
|
||||||
|
|
||||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||||
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
register_dataset(
|
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
|
||||||
llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval"
|
|
||||||
)
|
|
||||||
response = llama_stack_client.datasets.list()
|
response = llama_stack_client.datasets.list()
|
||||||
assert any(x.identifier == "test_dataset_for_eval" for x in response)
|
assert any(x.identifier == "test_dataset_for_eval" for x in response)
|
||||||
|
|
||||||
|
@ -61,9 +59,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
|
|
||||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||||
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
register_dataset(
|
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
|
||||||
llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2"
|
|
||||||
)
|
|
||||||
benchmark_id = str(uuid.uuid4())
|
benchmark_id = str(uuid.uuid4())
|
||||||
llama_stack_client.benchmarks.register(
|
llama_stack_client.benchmarks.register(
|
||||||
benchmark_id=benchmark_id,
|
benchmark_id=benchmark_id,
|
||||||
|
@ -84,14 +80,10 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
assert response.job_id == "0"
|
assert response.job_id == "0"
|
||||||
job_status = llama_stack_client.eval.jobs.status(
|
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||||
job_id=response.job_id, benchmark_id=benchmark_id
|
|
||||||
)
|
|
||||||
assert job_status and job_status == "completed"
|
assert job_status and job_status == "completed"
|
||||||
|
|
||||||
eval_response = llama_stack_client.eval.jobs.retrieve(
|
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||||
job_id=response.job_id, benchmark_id=benchmark_id
|
|
||||||
)
|
|
||||||
assert eval_response is not None
|
assert eval_response is not None
|
||||||
assert len(eval_response.generations) == 5
|
assert len(eval_response.generations) == 5
|
||||||
assert scoring_fn_id in eval_response.scores
|
assert scoring_fn_id in eval_response.scores
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue