From 820b9a00c7803ab7d1bc4d55b7988e29de347c9b Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Tue, 18 Mar 2025 20:16:15 -0700 Subject: [PATCH] Update llama_stack/apis/evaluation/evaluation.py Co-authored-by: Ashwin Bharambe --- llama_stack/apis/evaluation/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_stack/apis/evaluation/evaluation.py b/llama_stack/apis/evaluation/evaluation.py index 51f5c371c..4573525be 100644 --- a/llama_stack/apis/evaluation/evaluation.py +++ b/llama_stack/apis/evaluation/evaluation.py @@ -53,7 +53,7 @@ class EvaluationTask(BaseModel): A task for evaluation. To specify a task, one of the following must be provided: - `benchmark_id`: Run evaluation task against a benchmark_id. Use this when you have a curated dataset and have settled on the graders. - `dataset_id` and `grader_ids`: Run evaluation task against a dataset_id and a list of grader_ids - - `data_source` and `grader_ids`: Run evaluation task against a data source (e.g. rows, uri, etc.) and a list of grader_ids + - `data_source` and `grader_ids`: Run evaluation task against a data source (e.g. rows, uri, etc.) and a list of grader_ids. Prefer this when you are early in your evaluation cycle and experimenting much more with your data and graders. :param benchmark_id: The benchmark ID to evaluate. :param dataset_id: The dataset ID to evaluate.