From 913e6eb50f823d7cd6ddfd1593d73105a4ffab4a Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Tue, 18 Mar 2025 20:16:24 -0700 Subject: [PATCH] Update llama_stack/apis/evaluation/evaluation.py Co-authored-by: Ashwin Bharambe --- llama_stack/apis/evaluation/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/apis/evaluation/evaluation.py b/llama_stack/apis/evaluation/evaluation.py index 4573525be..e1f02dbae 100644 --- a/llama_stack/apis/evaluation/evaluation.py +++ b/llama_stack/apis/evaluation/evaluation.py @@ -52,7 +52,7 @@ class EvaluationTask(BaseModel): """ A task for evaluation. To specify a task, one of the following must be provided: - `benchmark_id`: Run evaluation task against a benchmark_id. Use this when you have a curated dataset and have settled on the graders. - - `dataset_id` and `grader_ids`: Run evaluation task against a dataset_id and a list of grader_ids + - `dataset_id` and `grader_ids`: Run evaluation task against a dataset_id and a list of grader_ids. Use this when you have datasets and / or are iterating on your graders. - `data_source` and `grader_ids`: Run evaluation task against a data source (e.g. rows, uri, etc.) and a list of grader_ids. Prefer this when you are early in your evaluation cycle and experimenting much more with your data and graders. :param benchmark_id: The benchmark ID to evaluate.