mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-07 11:08:20 +00:00
eval
This commit is contained in:
parent
b561cfd902
commit
659f5e86ee
5 changed files with 1094 additions and 1108 deletions
File diff suppressed because one or more lines are too long
|
@ -847,10 +847,10 @@
|
|||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"eval_rows = client.datasetio.get_rows_paginated(\n",
|
||||
"eval_rows = client.datasets.iterrows(\n",
|
||||
" dataset_id=simpleqa_dataset_id,\n",
|
||||
" rows_in_page=5,\n",
|
||||
")\n"
|
||||
" limit=5,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -957,7 +957,7 @@
|
|||
"\n",
|
||||
"response = client.eval.evaluate_rows_alpha(\n",
|
||||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" input_rows=eval_rows.data,\n",
|
||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
|
@ -1106,7 +1106,7 @@
|
|||
"\n",
|
||||
"response = client.eval.evaluate_rows_alpha(\n",
|
||||
" benchmark_id=\"meta-reference::simpleqa\",\n",
|
||||
" input_rows=eval_rows.rows,\n",
|
||||
" input_rows=eval_rows.data,\n",
|
||||
" scoring_functions=[\"llm-as-judge::405b-simpleqa\"],\n",
|
||||
" benchmark_config={\n",
|
||||
" \"type\": \"benchmark\",\n",
|
||||
|
|
|
@ -128,9 +128,9 @@ _ = client.datasets.register(
|
|||
},
|
||||
)
|
||||
|
||||
eval_rows = client.datasetio.get_rows_paginated(
|
||||
eval_rows = client.datasets.iterrows(
|
||||
dataset_id=simpleqa_dataset_id,
|
||||
rows_in_page=5,
|
||||
limit=5,
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -143,7 +143,7 @@ client.benchmarks.register(
|
|||
|
||||
response = client.eval.evaluate_rows(
|
||||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
input_rows=eval_rows.data,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
|
@ -191,7 +191,7 @@ agent_config = {
|
|||
|
||||
response = client.eval.evaluate_rows(
|
||||
benchmark_id="meta-reference::simpleqa",
|
||||
input_rows=eval_rows.rows,
|
||||
input_rows=eval_rows.data,
|
||||
scoring_functions=["llm-as-judge::405b-simpleqa"],
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
|
|
|
@ -194,7 +194,7 @@ def run_evaluation_3():
|
|||
if st.button("Run Evaluation"):
|
||||
progress_text = "Running evaluation..."
|
||||
progress_bar = st.progress(0, text=progress_text)
|
||||
rows = rows.rows
|
||||
rows = rows.data
|
||||
if num_rows < total_rows:
|
||||
rows = rows[:num_rows]
|
||||
|
||||
|
@ -229,7 +229,9 @@ def run_evaluation_3():
|
|||
output_res[scoring_fn] = []
|
||||
output_res[scoring_fn].append(eval_res.scores[scoring_fn].score_rows[0])
|
||||
|
||||
progress_text_container.write(f"Expand to see current processed result ({i + 1} / {len(rows)})")
|
||||
progress_text_container.write(
|
||||
f"Expand to see current processed result ({i + 1} / {len(rows)})"
|
||||
)
|
||||
results_container.json(eval_res, expanded=2)
|
||||
|
||||
progress_bar.progress(1.0, text="Evaluation complete!")
|
||||
|
|
|
@ -16,15 +16,17 @@ from ..datasetio.test_datasetio import register_dataset
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
|
||||
register_dataset(
|
||||
llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval"
|
||||
)
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert any(x.identifier == "test_dataset_for_eval" for x in response)
|
||||
|
||||
rows = llama_stack_client.datasetio.get_rows_paginated(
|
||||
rows = llama_stack_client.datasets.iterrows(
|
||||
dataset_id="test_dataset_for_eval",
|
||||
rows_in_page=3,
|
||||
limit=3,
|
||||
)
|
||||
assert len(rows.rows) == 3
|
||||
assert len(rows.data) == 3
|
||||
|
||||
scoring_functions = [
|
||||
scoring_fn_id,
|
||||
|
@ -40,7 +42,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
response = llama_stack_client.eval.evaluate_rows(
|
||||
benchmark_id=benchmark_id,
|
||||
input_rows=rows.rows,
|
||||
input_rows=rows.data,
|
||||
scoring_functions=scoring_functions,
|
||||
benchmark_config={
|
||||
"eval_candidate": {
|
||||
|
@ -59,7 +61,9 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
|
||||
register_dataset(
|
||||
llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2"
|
||||
)
|
||||
benchmark_id = str(uuid.uuid4())
|
||||
llama_stack_client.benchmarks.register(
|
||||
benchmark_id=benchmark_id,
|
||||
|
@ -80,10 +84,14 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
},
|
||||
)
|
||||
assert response.job_id == "0"
|
||||
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
job_status = llama_stack_client.eval.jobs.status(
|
||||
job_id=response.job_id, benchmark_id=benchmark_id
|
||||
)
|
||||
assert job_status and job_status == "completed"
|
||||
|
||||
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
eval_response = llama_stack_client.eval.jobs.retrieve(
|
||||
job_id=response.job_id, benchmark_id=benchmark_id
|
||||
)
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert scoring_fn_id in eval_response.scores
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue