add dataset datatypes

This commit is contained in:
Xi Yan 2024-10-10 17:19:18 -07:00
parent c8de439d9f
commit 99ed1425fc
5 changed files with 155 additions and 67 deletions

View file

@ -5,19 +5,19 @@
# the root directory of this source tree.
# TODO: make these import config based
from .dataset import CustomDataset, HFDataset
from .dataset_registry import DatasetRegistry
# from .dataset import CustomDataset, HFDataset
# from .dataset_registry import DatasetRegistry
DATASETS_REGISTRY = {
"mmlu-simple-eval-en": CustomDataset(
name="mmlu_eval",
url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
),
"hellaswag": HFDataset(
name="hellaswag",
url="hf://hellaswag?split=validation&trust_remote_code=True",
),
}
# DATASETS_REGISTRY = {
# "mmlu-simple-eval-en": CustomDataset(
# name="mmlu_eval",
# url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
# ),
# "hellaswag": HFDataset(
# name="hellaswag",
# url="hf://hellaswag?split=validation&trust_remote_code=True",
# ),
# }
for k, v in DATASETS_REGISTRY.items():
DatasetRegistry.register(k, v)
# for k, v in DATASETS_REGISTRY.items():
# DatasetRegistry.register(k, v)

View file

@ -3,60 +3,88 @@
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from abc import ABC, abstractmethod
from urllib.parse import parse_qs, urlparse
import pandas
from datasets import Dataset, load_dataset
from llama_stack.apis.dataset import * # noqa: F403
class BaseDataset(ABC):
def __init__(self, name: str):
class CustomDataset(BaseDataset[DictSample]):
def __init__(self, config: CustomDatasetDef) -> None:
super().__init__()
self.config = config
self.dataset = None
self.dataset_id = name
self.type = self.__class__.__name__
self.index = 0
def __iter__(self):
return iter(self.dataset)
def __iter__(self) -> Iterator[DictSample]:
return self
@abstractmethod
def load(self):
pass
def __next__(self) -> DictSample:
if not self.dataset:
self.load()
if self.index >= len(self.dataset):
raise StopIteration
sample = DictSample(data=self.dataset[self.index])
self.index += 1
return sample
def __str__(self):
return f"CustomDataset({self.config})"
class CustomDataset(BaseDataset):
def __init__(self, name, url):
super().__init__(name)
self.url = url
def __len__(self):
if not self.dataset:
self.load()
return len(self.dataset)
def load(self):
if self.dataset:
return
# TODO: better support w/ data url
if self.url.endswith(".csv"):
df = pandas.read_csv(self.url)
elif self.url.endswith(".xlsx"):
df = pandas.read_excel(self.url)
if self.config.url.endswith(".csv"):
df = pandas.read_csv(self.config.url)
elif self.config.url.endswith(".xlsx"):
df = pandas.read_excel(self.config.url)
self.dataset = Dataset.from_pandas(df)
class HFDataset(BaseDataset):
def __init__(self, name, url):
super().__init__(name)
self.url = url
class HuggingfaceDataset(BaseDataset[DictSample]):
def __init__(self, config: HuggingfaceDatasetDef):
super().__init__()
self.config = config
self.dataset = None
self.index = 0
def __iter__(self) -> Iterator[DictSample]:
return self
def __next__(self) -> DictSample:
if not self.dataset:
self.load()
if self.index >= len(self.dataset):
raise StopIteration
sample = DictSample(data=self.dataset[self.index])
self.index += 1
return sample
def __str__(self):
return f"HuggingfaceDataset({self.config})"
def __len__(self):
if not self.dataset:
self.load()
return len(self.dataset)
def load(self):
if self.dataset:
return
self.dataset = load_dataset(self.config.dataset_name, **self.config.kwargs)
# parsed = urlparse(self.url)
parsed = urlparse(self.url)
# if parsed.scheme != "hf":
# raise ValueError(f"Unknown HF dataset: {self.url}")
if parsed.scheme != "hf":
raise ValueError(f"Unknown HF dataset: {self.url}")
query = parse_qs(parsed.query)
query = {k: v[0] for k, v in query.items()}
path = parsed.netloc
self.dataset = load_dataset(path, **query)
# query = parse_qs(parsed.query)
# query = {k: v[0] for k, v in query.items()}
# path = parsed.netloc
# self.dataset = load_dataset(path, **query)

View file

@ -5,7 +5,7 @@
# the root directory of this source tree.
from typing import AbstractSet, Dict
from .dataset import BaseDataset
from llama_stack.apis.dataset import BaseDataset
class DatasetRegistry: