mirror of
https://github.com/meta-llama/llama-stack.git
synced 2026-01-07 09:49:56 +00:00
fix eval
This commit is contained in:
parent
62a844c614
commit
9066b2ac12
1 changed files with 1 additions and 1 deletions
|
|
@ -83,7 +83,7 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
assert job_status and job_status == "completed"
|
||||
|
||||
eval_response = llama_stack_client.eval.jobs.result(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert scoring_fn_id in eval_response.scores
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue