mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
## PR Stack - https://github.com/meta-llama/llama-stack/pull/1573 - https://github.com/meta-llama/llama-stack/pull/1625 - https://github.com/meta-llama/llama-stack/pull/1656 - https://github.com/meta-llama/llama-stack/pull/1657 - https://github.com/meta-llama/llama-stack/pull/1658 - https://github.com/meta-llama/llama-stack/pull/1659 - https://github.com/meta-llama/llama-stack/pull/1660 **Client SDK** - https://github.com/meta-llama/llama-stack-client-python/pull/203 **CI** -1391130488
<img width="1042" alt="image" src="https://github.com/user-attachments/assets/69636067-376d-436b-9204-896e2dd490ca" /> -- the test_rag_agent_with_attachments is flaky and not related to this PR ## Doc <img width="789" alt="image" src="https://github.com/user-attachments/assets/b88390f3-73d6-4483-b09a-a192064e32d9" /> ## Client Usage ```python client.datasets.register( source={ "type": "uri", "uri": "lsfs://mydata.jsonl", }, schema="jsonl_messages", # optional dataset_id="my_first_train_data" ) # quick prototype debugging client.datasets.register( data_reference={ "type": "rows", "rows": [ "messages": [...], ], }, schema="jsonl_messages", ) ``` ## Test Plan - CI:1387805545
``` LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/datasets/test_datasets.py ``` ``` LLAMA_STACK_CONFIG=fireworks pytest -v tests/integration/scoring/test_scoring.py ``` ``` pytest -v -s --nbval-lax ./docs/notebooks/Llama_Stack_Benchmark_Evals.ipynb ```
95 lines
2.8 KiB
Python
95 lines
2.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
|
|
import base64
|
|
import mimetypes
|
|
import os
|
|
|
|
import pytest
|
|
|
|
# How to run this test:
|
|
#
|
|
# LLAMA_STACK_CONFIG="template-name" pytest -v tests/integration/datasets
|
|
|
|
|
|
def data_url_from_file(file_path: str) -> str:
|
|
if not os.path.exists(file_path):
|
|
raise FileNotFoundError(f"File not found: {file_path}")
|
|
|
|
with open(file_path, "rb") as file:
|
|
file_content = file.read()
|
|
|
|
base64_content = base64.b64encode(file_content).decode("utf-8")
|
|
mime_type, _ = mimetypes.guess_type(file_path)
|
|
|
|
data_url = f"data:{mime_type};base64,{base64_content}"
|
|
|
|
return data_url
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"purpose, source, provider_id, limit",
|
|
[
|
|
(
|
|
"eval/messages-answer",
|
|
{
|
|
"type": "uri",
|
|
"uri": "huggingface://datasets/llamastack/simpleqa?split=train",
|
|
},
|
|
"huggingface",
|
|
10,
|
|
),
|
|
(
|
|
"eval/messages-answer",
|
|
{
|
|
"type": "rows",
|
|
"rows": [
|
|
{
|
|
"messages": [{"role": "user", "content": "Hello, world!"}],
|
|
"answer": "Hello, world!",
|
|
},
|
|
{
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": "What is the capital of France?",
|
|
}
|
|
],
|
|
"answer": "Paris",
|
|
},
|
|
],
|
|
},
|
|
"localfs",
|
|
2,
|
|
),
|
|
(
|
|
"eval/messages-answer",
|
|
{
|
|
"type": "uri",
|
|
"uri": data_url_from_file(os.path.join(os.path.dirname(__file__), "test_dataset.csv")),
|
|
},
|
|
"localfs",
|
|
5,
|
|
),
|
|
],
|
|
)
|
|
def test_register_and_iterrows(llama_stack_client, purpose, source, provider_id, limit):
|
|
dataset = llama_stack_client.datasets.register(
|
|
purpose=purpose,
|
|
source=source,
|
|
)
|
|
assert dataset.identifier is not None
|
|
assert dataset.provider_id == provider_id
|
|
iterrow_response = llama_stack_client.datasets.iterrows(dataset.identifier, limit=limit)
|
|
assert len(iterrow_response.data) == limit
|
|
|
|
dataset_list = llama_stack_client.datasets.list()
|
|
assert dataset.identifier in [d.identifier for d in dataset_list]
|
|
|
|
llama_stack_client.datasets.unregister(dataset.identifier)
|
|
dataset_list = llama_stack_client.datasets.list()
|
|
assert dataset.identifier not in [d.identifier for d in dataset_list]
|