feat(api): (1/n) datasets api clean up (#1573)

## PR Stack
- https://github.com/meta-llama/llama-stack/pull/1573
- https://github.com/meta-llama/llama-stack/pull/1625
- https://github.com/meta-llama/llama-stack/pull/1656
- https://github.com/meta-llama/llama-stack/pull/1657
- https://github.com/meta-llama/llama-stack/pull/1658
- https://github.com/meta-llama/llama-stack/pull/1659
- https://github.com/meta-llama/llama-stack/pull/1660

**Client SDK**
- https://github.com/meta-llama/llama-stack-client-python/pull/203

**CI**
- 1391130488
<img width="1042" alt="image"
src="https://github.com/user-attachments/assets/69636067-376d-436b-9204-896e2dd490ca"
/>
-- the test_rag_agent_with_attachments is flaky and not related to this
PR

## Doc
<img width="789" alt="image"
src="https://github.com/user-attachments/assets/b88390f3-73d6-4483-b09a-a192064e32d9"
/>


## Client Usage
```python
client.datasets.register(
    source={
        "type": "uri",
        "uri": "lsfs://mydata.jsonl",
    },
    schema="jsonl_messages",
    # optional 
    dataset_id="my_first_train_data"
)

# quick prototype debugging
client.datasets.register(
    data_reference={
        "type": "rows",
        "rows": [
                "messages": [...],
        ],
    },
    schema="jsonl_messages",
)
```

## Test Plan
- CI:
1387805545

```
LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/datasets/test_datasets.py
```

```
LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/scoring/test_scoring.py
```

```
pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb
```
This commit is contained in:
Xi Yan 2025-03-17 16:55:45 -07:00 committed by GitHub
parent 3b35a39b8b
commit 5287b437ae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
29 changed files with 2593 additions and 2296 deletions

View file

@ -1,114 +0,0 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import base64
import mimetypes
import os
from pathlib import Path
import pytest
# How to run this test:
#
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasetio
@pytest.fixture
def dataset_for_test(llama_stack_client):
dataset_id = "test_dataset"
register_dataset(llama_stack_client, dataset_id=dataset_id)
yield
# Teardown - this always runs, even if the test fails
try:
llama_stack_client.datasets.unregister(dataset_id)
except Exception as e:
print(f"Warning: Failed to unregister test_dataset: {e}")
def data_url_from_file(file_path: str) -> str:
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
with open(file_path, "rb") as file:
file_content = file.read()
base64_content = base64.b64encode(file_content).decode("utf-8")
mime_type, _ = mimetypes.guess_type(file_path)
data_url = f"data:{mime_type};base64,{base64_content}"
return data_url
def register_dataset(llama_stack_client, for_generation=False, for_rag=False, dataset_id="test_dataset"):
if for_rag:
test_file = Path(os.path.abspath(__file__)).parent / "test_rag_dataset.csv"
else:
test_file = Path(os.path.abspath(__file__)).parent / "test_dataset.csv"
test_url = data_url_from_file(str(test_file))
if for_generation:
dataset_schema = {
"expected_answer": {"type": "string"},
"input_query": {"type": "string"},
"chat_completion_input": {"type": "chat_completion_input"},
}
elif for_rag:
dataset_schema = {
"expected_answer": {"type": "string"},
"input_query": {"type": "string"},
"generated_answer": {"type": "string"},
"context": {"type": "string"},
}
else:
dataset_schema = {
"expected_answer": {"type": "string"},
"input_query": {"type": "string"},
"generated_answer": {"type": "string"},
}
dataset_providers = [x for x in llama_stack_client.providers.list() if x.api == "datasetio"]
dataset_provider_id = dataset_providers[0].provider_id
llama_stack_client.datasets.register(
dataset_id=dataset_id,
dataset_schema=dataset_schema,
url=dict(uri=test_url),
provider_id=dataset_provider_id,
)
def test_register_unregister_dataset(llama_stack_client):
register_dataset(llama_stack_client)
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 1
assert response[0].identifier == "test_dataset"
llama_stack_client.datasets.unregister("test_dataset")
response = llama_stack_client.datasets.list()
assert isinstance(response, list)
assert len(response) == 0
def test_get_rows_paginated(llama_stack_client, dataset_for_test):
response = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
rows_in_page=3,
)
assert isinstance(response.rows, list)
assert len(response.rows) == 3
assert response.next_page_token == "3"
# iterate over all rows
response = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
rows_in_page=2,
page_token=response.next_page_token,
)
assert isinstance(response.rows, list)
assert len(response.rows) == 2
assert response.next_page_token == "5"

View file

@ -0,0 +1,95 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import base64
import mimetypes
import os
import pytest
# How to run this test:
#
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasets
def data_url_from_file(file_path: str) -> str:
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
with open(file_path, "rb") as file:
file_content = file.read()
base64_content = base64.b64encode(file_content).decode("utf-8")
mime_type, _ = mimetypes.guess_type(file_path)
data_url = f"data:{mime_type};base64,{base64_content}"
return data_url
@pytest.mark.parametrize(
"purpose, source, provider_id, limit",
[
(
"eval/messages-answer",
{
"type": "uri",
"uri": "huggingface://datasets/llamastack/simpleqa?split=train",
},
"huggingface",
10,
),
(
"eval/messages-answer",
{
"type": "rows",
"rows": [
{
"messages": [{"role": "user", "content": "Hello, world!"}],
"answer": "Hello, world!",
},
{
"messages": [
{
"role": "user",
"content": "What is the capital of France?",
}
],
"answer": "Paris",
},
],
},
"localfs",
2,
),
(
"eval/messages-answer",
{
"type": "uri",
"uri": data_url_from_file(os.path.join(os.path.dirname(__file__), "test_dataset.csv")),
},
"localfs",
5,
),
],
)
def test_register_and_iterrows(llama_stack_client, purpose, source, provider_id, limit):
dataset = llama_stack_client.datasets.register(
purpose=purpose,
source=source,
)
assert dataset.identifier is not None
assert dataset.provider_id == provider_id
iterrow_response = llama_stack_client.datasets.iterrows(dataset.identifier, limit=limit)
assert len(iterrow_response.data) == limit
dataset_list = llama_stack_client.datasets.list()
assert dataset.identifier in [d.identifier for d in dataset_list]
llama_stack_client.datasets.unregister(dataset.identifier)
dataset_list = llama_stack_client.datasets.list()
assert dataset.identifier not in [d.identifier for d in dataset_list]

View file

@ -4,10 +4,11 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import uuid
from pathlib import Path
import pytest
from ..datasetio.test_datasetio import register_dataset
from ..datasets.test_datasets import data_url_from_file
# How to run this test:
#
@ -16,15 +17,21 @@ from ..datasetio.test_datasetio import register_dataset
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
response = llama_stack_client.datasets.list()
assert any(x.identifier == "test_dataset_for_eval" for x in response)
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset_for_eval",
rows_in_page=3,
dataset = llama_stack_client.datasets.register(
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
assert len(rows.rows) == 3
response = llama_stack_client.datasets.list()
assert any(x.identifier == dataset.identifier for x in response)
rows = llama_stack_client.datasets.iterrows(
dataset_id=dataset.identifier,
limit=3,
)
assert len(rows.data) == 3
scoring_functions = [
scoring_fn_id,
@ -32,7 +39,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval",
dataset_id=dataset.identifier,
scoring_functions=scoring_functions,
)
list_benchmarks = llama_stack_client.benchmarks.list()
@ -40,7 +47,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
response = llama_stack_client.eval.evaluate_rows(
benchmark_id=benchmark_id,
input_rows=rows.rows,
input_rows=rows.data,
scoring_functions=scoring_functions,
benchmark_config={
"eval_candidate": {
@ -59,11 +66,17 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
dataset = llama_stack_client.datasets.register(
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
benchmark_id = str(uuid.uuid4())
llama_stack_client.benchmarks.register(
benchmark_id=benchmark_id,
dataset_id="test_dataset_for_eval_2",
dataset_id=dataset.identifier,
scoring_functions=[scoring_fn_id],
)

View file

@ -5,23 +5,11 @@
# the root directory of this source tree.
from pathlib import Path
import pandas as pd
import pytest
from ..datasetio.test_datasetio import register_dataset
@pytest.fixture
def rag_dataset_for_test(llama_stack_client):
dataset_id = "test_dataset"
register_dataset(llama_stack_client, for_rag=True, dataset_id=dataset_id)
yield # This is where the test function will run
# Teardown - this always runs, even if the test fails
try:
llama_stack_client.datasets.unregister(dataset_id)
except Exception as e:
print(f"Warning: Failed to unregister test_dataset: {e}")
@pytest.fixture
def sample_judge_prompt_template():
@ -92,49 +80,34 @@ def test_scoring_functions_register(
# TODO: add unregister api for scoring functions
def test_scoring_score(llama_stack_client, rag_dataset_for_test):
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
def test_scoring_score(llama_stack_client, scoring_fn_id):
# scoring individual rows
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
rows_in_page=3,
)
assert len(rows.rows) == 3
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
rows = df.to_dict(orient="records")
scoring_fns_list = llama_stack_client.scoring_functions.list()
scoring_functions = {
scoring_fns_list[0].identifier: None,
scoring_fn_id: None,
}
response = llama_stack_client.scoring.score(
input_rows=rows.rows,
input_rows=rows,
scoring_functions=scoring_functions,
)
assert len(response.results) == len(scoring_functions)
for x in scoring_functions:
assert x in response.results
assert len(response.results[x].score_rows) == len(rows.rows)
# score batch
response = llama_stack_client.scoring.score_batch(
dataset_id="test_dataset",
scoring_functions=scoring_functions,
save_results_dataset=False,
)
assert len(response.results) == len(scoring_functions)
for x in scoring_functions:
assert x in response.results
assert len(response.results[x].score_rows) == 5
assert len(response.results[x].score_rows) == len(rows)
def test_scoring_score_with_params_llm_as_judge(
llama_stack_client, sample_judge_prompt_template, judge_model_id, rag_dataset_for_test
llama_stack_client,
sample_judge_prompt_template,
judge_model_id,
):
# scoring individual rows
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
rows_in_page=3,
)
assert len(rows.rows) == 3
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
rows = df.to_dict(orient="records")
scoring_functions = {
"llm-as-judge::base": dict(
@ -149,24 +122,13 @@ def test_scoring_score_with_params_llm_as_judge(
}
response = llama_stack_client.scoring.score(
input_rows=rows.rows,
input_rows=rows,
scoring_functions=scoring_functions,
)
assert len(response.results) == len(scoring_functions)
for x in scoring_functions:
assert x in response.results
assert len(response.results[x].score_rows) == len(rows.rows)
# score batch
response = llama_stack_client.scoring.score_batch(
dataset_id="test_dataset",
scoring_functions=scoring_functions,
save_results_dataset=False,
)
assert len(response.results) == len(scoring_functions)
for x in scoring_functions:
assert x in response.results
assert len(response.results[x].score_rows) == 5
assert len(response.results[x].score_rows) == len(rows)
@pytest.mark.parametrize(
@ -178,13 +140,14 @@ def test_scoring_score_with_params_llm_as_judge(
],
)
def test_scoring_score_with_aggregation_functions(
llama_stack_client, sample_judge_prompt_template, judge_model_id, provider_id, rag_dataset_for_test
llama_stack_client,
sample_judge_prompt_template,
judge_model_id,
provider_id,
rag_dataset_for_test,
):
rows = llama_stack_client.datasetio.get_rows_paginated(
dataset_id="test_dataset",
rows_in_page=3,
)
assert len(rows.rows) == 3
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
rows = df.to_dict(orient="records")
scoring_fns_list = [x for x in llama_stack_client.scoring_functions.list() if x.provider_id == provider_id]
if len(scoring_fns_list) == 0:
@ -224,12 +187,12 @@ def test_scoring_score_with_aggregation_functions(
scoring_functions[scoring_fn.identifier] = None
response = llama_stack_client.scoring.score(
input_rows=rows.rows,
input_rows=rows,
scoring_functions=scoring_functions,
)
assert len(response.results) == len(scoring_functions)
for x in scoring_functions:
assert x in response.results
assert len(response.results[x].score_rows) == len(rows.rows)
assert len(response.results[x].score_rows) == len(rows)
assert len(response.results[x].aggregated_results) == len(aggr_fns)