mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
add dataset datatypes
This commit is contained in:
parent
c8de439d9f
commit
99ed1425fc
5 changed files with 155 additions and 67 deletions
|
@ -4,46 +4,105 @@
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from typing import Any, Dict, Optional, Protocol
|
from abc import ABC, abstractmethod
|
||||||
|
from enum import Enum
|
||||||
from llama_models.llama3.api.datatypes import URL
|
from typing import Any, Dict, Generic, Iterator, Literal, Protocol, TypeVar, Union
|
||||||
|
|
||||||
from llama_models.schema_utils import json_schema_type, webmethod
|
from llama_models.schema_utils import json_schema_type, webmethod
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel, Field
|
||||||
|
from typing_extensions import Annotated
|
||||||
|
|
||||||
|
TDatasetRow = TypeVar("TDatasetRow")
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class TrainEvalDataset(BaseModel):
|
class DatasetRow(BaseModel): ...
|
||||||
"""Dataset to be used for training or evaluating language models."""
|
|
||||||
|
|
||||||
# unique identifier associated with the dataset
|
|
||||||
dataset_id: str
|
|
||||||
content_url: URL
|
|
||||||
metadata: Optional[Dict[str, Any]] = None
|
|
||||||
|
|
||||||
|
|
||||||
@json_schema_type
|
@json_schema_type
|
||||||
class CreateDatasetRequest(BaseModel):
|
class DictSample(DatasetRow):
|
||||||
"""Request to create a dataset."""
|
data: Dict[str, Any]
|
||||||
|
|
||||||
uuid: str
|
|
||||||
dataset: TrainEvalDataset
|
@json_schema_type
|
||||||
|
class Generation(BaseModel): ...
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class DatasetType(Enum):
|
||||||
|
custom = "custom"
|
||||||
|
huggingface = "huggingface"
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class HuggingfaceDatasetDef(BaseModel):
|
||||||
|
type: Literal[DatasetType.huggingface.value] = DatasetType.huggingface.value
|
||||||
|
identifier: str = Field(
|
||||||
|
description="A unique name for the dataset",
|
||||||
|
)
|
||||||
|
dataset_name: str = Field(
|
||||||
|
description="The name of the dataset into HF (e.g. hellawag)",
|
||||||
|
)
|
||||||
|
kwargs: Dict[str, Any] = Field(
|
||||||
|
description="Any additional arguments to get Huggingface (e.g. split, trust_remote_code)",
|
||||||
|
default_factory=dict,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@json_schema_type
|
||||||
|
class CustomDatasetDef(BaseModel):
|
||||||
|
type: Literal[DatasetType.custom.value] = DatasetType.custom.value
|
||||||
|
identifier: str = Field(
|
||||||
|
description="A unique name for the dataset",
|
||||||
|
)
|
||||||
|
url: str = Field(
|
||||||
|
description="The URL to the dataset",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
DatasetDef = Annotated[
|
||||||
|
Union[
|
||||||
|
HuggingfaceDatasetDef,
|
||||||
|
CustomDatasetDef,
|
||||||
|
],
|
||||||
|
Field(discriminator="type"),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class BaseDataset(ABC, Generic[TDatasetRow]):
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.type: str = self.__class__.__name__
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __iter__(self) -> Iterator[TDatasetRow]:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def load(self) -> None:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __str__(self) -> str:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def __len__(self) -> int:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class Datasets(Protocol):
|
class Datasets(Protocol):
|
||||||
@webmethod(route="/datasets/create")
|
@webmethod(route="/datasets/create")
|
||||||
def create_dataset(
|
def create_dataset(
|
||||||
self,
|
self,
|
||||||
uuid: str,
|
dataset: DatasetDef,
|
||||||
dataset: TrainEvalDataset,
|
|
||||||
) -> None: ...
|
) -> None: ...
|
||||||
|
|
||||||
@webmethod(route="/datasets/get")
|
@webmethod(route="/datasets/get")
|
||||||
def get_dataset(
|
def get_dataset(
|
||||||
self,
|
self,
|
||||||
dataset_uuid: str,
|
dataset_identifier: str,
|
||||||
) -> TrainEvalDataset: ...
|
) -> DatasetDef: ...
|
||||||
|
|
||||||
@webmethod(route="/datasets/delete")
|
@webmethod(route="/datasets/delete")
|
||||||
def delete_dataset(
|
def delete_dataset(
|
||||||
|
|
|
@ -33,6 +33,7 @@ class EvaluateTaskConfig(BaseModel):
|
||||||
class EvaluateResponse(BaseModel):
|
class EvaluateResponse(BaseModel):
|
||||||
"""Scores for evaluation."""
|
"""Scores for evaluation."""
|
||||||
|
|
||||||
|
preprocess_output: GenerationOutput
|
||||||
metrics: Dict[str, str]
|
metrics: Dict[str, str]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -5,19 +5,19 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
# TODO: make these import config based
|
# TODO: make these import config based
|
||||||
from .dataset import CustomDataset, HFDataset
|
# from .dataset import CustomDataset, HFDataset
|
||||||
from .dataset_registry import DatasetRegistry
|
# from .dataset_registry import DatasetRegistry
|
||||||
|
|
||||||
DATASETS_REGISTRY = {
|
# DATASETS_REGISTRY = {
|
||||||
"mmlu-simple-eval-en": CustomDataset(
|
# "mmlu-simple-eval-en": CustomDataset(
|
||||||
name="mmlu_eval",
|
# name="mmlu_eval",
|
||||||
url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
|
# url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
|
||||||
),
|
# ),
|
||||||
"hellaswag": HFDataset(
|
# "hellaswag": HFDataset(
|
||||||
name="hellaswag",
|
# name="hellaswag",
|
||||||
url="hf://hellaswag?split=validation&trust_remote_code=True",
|
# url="hf://hellaswag?split=validation&trust_remote_code=True",
|
||||||
),
|
# ),
|
||||||
}
|
# }
|
||||||
|
|
||||||
for k, v in DATASETS_REGISTRY.items():
|
# for k, v in DATASETS_REGISTRY.items():
|
||||||
DatasetRegistry.register(k, v)
|
# DatasetRegistry.register(k, v)
|
||||||
|
|
|
@ -3,60 +3,88 @@
|
||||||
#
|
#
|
||||||
# This source code is licensed under the terms described in the LICENSE file in
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from urllib.parse import parse_qs, urlparse
|
|
||||||
|
|
||||||
import pandas
|
import pandas
|
||||||
from datasets import Dataset, load_dataset
|
from datasets import Dataset, load_dataset
|
||||||
|
|
||||||
|
from llama_stack.apis.dataset import * # noqa: F403
|
||||||
|
|
||||||
class BaseDataset(ABC):
|
|
||||||
def __init__(self, name: str):
|
class CustomDataset(BaseDataset[DictSample]):
|
||||||
|
def __init__(self, config: CustomDatasetDef) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.config = config
|
||||||
self.dataset = None
|
self.dataset = None
|
||||||
self.dataset_id = name
|
self.index = 0
|
||||||
self.type = self.__class__.__name__
|
|
||||||
|
|
||||||
def __iter__(self):
|
def __iter__(self) -> Iterator[DictSample]:
|
||||||
return iter(self.dataset)
|
return self
|
||||||
|
|
||||||
@abstractmethod
|
def __next__(self) -> DictSample:
|
||||||
def load(self):
|
if not self.dataset:
|
||||||
pass
|
self.load()
|
||||||
|
if self.index >= len(self.dataset):
|
||||||
|
raise StopIteration
|
||||||
|
sample = DictSample(data=self.dataset[self.index])
|
||||||
|
self.index += 1
|
||||||
|
return sample
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"CustomDataset({self.config})"
|
||||||
|
|
||||||
class CustomDataset(BaseDataset):
|
def __len__(self):
|
||||||
def __init__(self, name, url):
|
if not self.dataset:
|
||||||
super().__init__(name)
|
self.load()
|
||||||
self.url = url
|
return len(self.dataset)
|
||||||
|
|
||||||
def load(self):
|
def load(self):
|
||||||
if self.dataset:
|
if self.dataset:
|
||||||
return
|
return
|
||||||
# TODO: better support w/ data url
|
# TODO: better support w/ data url
|
||||||
if self.url.endswith(".csv"):
|
if self.config.url.endswith(".csv"):
|
||||||
df = pandas.read_csv(self.url)
|
df = pandas.read_csv(self.config.url)
|
||||||
elif self.url.endswith(".xlsx"):
|
elif self.config.url.endswith(".xlsx"):
|
||||||
df = pandas.read_excel(self.url)
|
df = pandas.read_excel(self.config.url)
|
||||||
|
|
||||||
self.dataset = Dataset.from_pandas(df)
|
self.dataset = Dataset.from_pandas(df)
|
||||||
|
|
||||||
|
|
||||||
class HFDataset(BaseDataset):
|
class HuggingfaceDataset(BaseDataset[DictSample]):
|
||||||
def __init__(self, name, url):
|
def __init__(self, config: HuggingfaceDatasetDef):
|
||||||
super().__init__(name)
|
super().__init__()
|
||||||
self.url = url
|
self.config = config
|
||||||
|
self.dataset = None
|
||||||
|
self.index = 0
|
||||||
|
|
||||||
|
def __iter__(self) -> Iterator[DictSample]:
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self) -> DictSample:
|
||||||
|
if not self.dataset:
|
||||||
|
self.load()
|
||||||
|
if self.index >= len(self.dataset):
|
||||||
|
raise StopIteration
|
||||||
|
sample = DictSample(data=self.dataset[self.index])
|
||||||
|
self.index += 1
|
||||||
|
return sample
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f"HuggingfaceDataset({self.config})"
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
if not self.dataset:
|
||||||
|
self.load()
|
||||||
|
return len(self.dataset)
|
||||||
|
|
||||||
def load(self):
|
def load(self):
|
||||||
if self.dataset:
|
if self.dataset:
|
||||||
return
|
return
|
||||||
|
self.dataset = load_dataset(self.config.dataset_name, **self.config.kwargs)
|
||||||
|
# parsed = urlparse(self.url)
|
||||||
|
|
||||||
parsed = urlparse(self.url)
|
# if parsed.scheme != "hf":
|
||||||
|
# raise ValueError(f"Unknown HF dataset: {self.url}")
|
||||||
|
|
||||||
if parsed.scheme != "hf":
|
# query = parse_qs(parsed.query)
|
||||||
raise ValueError(f"Unknown HF dataset: {self.url}")
|
# query = {k: v[0] for k, v in query.items()}
|
||||||
|
# path = parsed.netloc
|
||||||
query = parse_qs(parsed.query)
|
# self.dataset = load_dataset(path, **query)
|
||||||
query = {k: v[0] for k, v in query.items()}
|
|
||||||
path = parsed.netloc
|
|
||||||
self.dataset = load_dataset(path, **query)
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
# the root directory of this source tree.
|
# the root directory of this source tree.
|
||||||
from typing import AbstractSet, Dict
|
from typing import AbstractSet, Dict
|
||||||
|
|
||||||
from .dataset import BaseDataset
|
from llama_stack.apis.dataset import BaseDataset
|
||||||
|
|
||||||
|
|
||||||
class DatasetRegistry:
|
class DatasetRegistry:
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue