feat(dataset api): (1.6/n) fix all iterrows callsites (#1660)

# What does this PR do?
- as title

[//]: # (If resolving an issue, uncomment and update the line below)
[//]: # (Closes #[issue-number])

## Test Plan
CI

```
pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
```
<img width="587" alt="image"
src="https://github.com/user-attachments/assets/4a25f493-501e-43f4-9836-d9802223a93a"
/>


[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-15 17:24:16 -07:00 committed by GitHub
parent f2d93324e9
commit a6fa3aa5a2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 1370 additions and 1219 deletions

View file

@ -20,11 +20,11 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
response = llama_stack_client.datasets.list()
assert any(x.identifier == "test_dataset_for_eval" for x in response)
rows = llama_stack_client.datasetio.get_rows_paginated(
rows = llama_stack_client.datasets.iterrows(
dataset_id="test_dataset_for_eval",
rows_in_page=3,
limit=3,
)
assert len(rows.rows) == 3
assert len(rows.data) == 3
scoring_functions = [
scoring_fn_id,
@ -40,7 +40,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
response = llama_stack_client.eval.evaluate_rows(
benchmark_id=benchmark_id,
input_rows=rows.rows,
input_rows=rows.data,
scoring_functions=scoring_functions,
benchmark_config={
"eval_candidate": {