mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-10 11:39:47 +00:00
datasets api
This commit is contained in:
parent
18fe966e96
commit
f046899a1c
15 changed files with 281 additions and 80 deletions
|
|
@ -3,3 +3,20 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
from typing import Any
|
||||
|
||||
from llama_stack.providers.datatypes import Api
|
||||
from .datasets.dataset import DatasetRegistryImpl
|
||||
|
||||
|
||||
async def get_registry_impl(api: Api, _deps) -> Any:
|
||||
api_to_registry = {
|
||||
"datasets": DatasetRegistryImpl,
|
||||
}
|
||||
|
||||
if api.value not in api_to_registry:
|
||||
raise ValueError(f"API {api.value} not found in registry map")
|
||||
|
||||
impl = api_to_registry[api.value]()
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
# TODO: make these import config based
|
||||
from llama_stack.apis.dataset import * # noqa: F403
|
||||
from llama_stack.apis.datasets import * # noqa: F403
|
||||
from ..registry import Registry
|
||||
from .dataset import CustomDataset, HuggingfaceDataset
|
||||
from .dataset_wrappers import CustomDataset, HuggingfaceDataset
|
||||
|
||||
|
||||
class DatasetRegistry(Registry[BaseDataset]):
|
||||
|
|
|
|||
|
|
@ -3,76 +3,38 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import pandas
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from llama_stack.apis.dataset import * # noqa: F403
|
||||
# from llama_stack.apis.datasets import *
|
||||
# from llama_stack.distribution.registry.datasets import DatasetRegistry # noqa: F403
|
||||
# from ..registry import Registry
|
||||
# from .dataset_wrappers import CustomDataset, HuggingfaceDataset
|
||||
|
||||
|
||||
class CustomDataset(BaseDataset[DictSample]):
|
||||
def __init__(self, config: CustomDatasetDef) -> None:
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.dataset = None
|
||||
self.index = 0
|
||||
class DatasetRegistryImpl(Datasets):
|
||||
"""API Impl to interact with underlying dataset registry"""
|
||||
|
||||
@property
|
||||
def dataset_id(self) -> str:
|
||||
return self.config.identifier
|
||||
def __init__(
|
||||
self,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def __iter__(self) -> Iterator[DictSample]:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return (DictSample(data=x) for x in self.dataset)
|
||||
async def initialize(self) -> None:
|
||||
pass
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"CustomDataset({self.config})"
|
||||
async def shutdown(self) -> None:
|
||||
pass
|
||||
|
||||
def __len__(self) -> int:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return len(self.dataset)
|
||||
async def create_dataset(
|
||||
self,
|
||||
dataset_def: DatasetDef,
|
||||
) -> None:
|
||||
print(f"Creating dataset {dataset.identifier}")
|
||||
|
||||
def load(self, n_samples: Optional[int] = None) -> None:
|
||||
if self.dataset:
|
||||
return
|
||||
async def get_dataset(
|
||||
self,
|
||||
dataset_identifier: str,
|
||||
) -> DatasetDef:
|
||||
pass
|
||||
|
||||
# TODO: better support w/ data url
|
||||
if self.config.url.endswith(".csv"):
|
||||
df = pandas.read_csv(self.config.url)
|
||||
elif self.config.url.endswith(".xlsx"):
|
||||
df = pandas.read_excel(self.config.url)
|
||||
|
||||
if n_samples is not None:
|
||||
df = df.sample(n=n_samples)
|
||||
|
||||
self.dataset = Dataset.from_pandas(df)
|
||||
|
||||
|
||||
class HuggingfaceDataset(BaseDataset[DictSample]):
|
||||
def __init__(self, config: HuggingfaceDatasetDef):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.dataset = None
|
||||
|
||||
@property
|
||||
def dataset_id(self) -> str:
|
||||
return self.config.identifier
|
||||
|
||||
def __iter__(self) -> Iterator[DictSample]:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return (DictSample(data=x) for x in self.dataset)
|
||||
|
||||
def __str__(self):
|
||||
return f"HuggingfaceDataset({self.config})"
|
||||
|
||||
def __len__(self):
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return len(self.dataset)
|
||||
|
||||
def load(self):
|
||||
if self.dataset:
|
||||
return
|
||||
self.dataset = load_dataset(self.config.dataset_name, **self.config.kwargs)
|
||||
async def delete_dataset(self, dataset_identifier: str) -> None:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -0,0 +1,78 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import pandas
|
||||
from datasets import Dataset, load_dataset
|
||||
|
||||
from llama_stack.apis.datasets import * # noqa: F403
|
||||
|
||||
|
||||
class CustomDataset(BaseDataset[DictSample]):
|
||||
def __init__(self, config: CustomDatasetDef) -> None:
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.dataset = None
|
||||
self.index = 0
|
||||
|
||||
@property
|
||||
def dataset_id(self) -> str:
|
||||
return self.config.identifier
|
||||
|
||||
def __iter__(self) -> Iterator[DictSample]:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return (DictSample(data=x) for x in self.dataset)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"CustomDataset({self.config})"
|
||||
|
||||
def __len__(self) -> int:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return len(self.dataset)
|
||||
|
||||
def load(self, n_samples: Optional[int] = None) -> None:
|
||||
if self.dataset:
|
||||
return
|
||||
|
||||
# TODO: better support w/ data url
|
||||
if self.config.url.endswith(".csv"):
|
||||
df = pandas.read_csv(self.config.url)
|
||||
elif self.config.url.endswith(".xlsx"):
|
||||
df = pandas.read_excel(self.config.url)
|
||||
|
||||
if n_samples is not None:
|
||||
df = df.sample(n=n_samples)
|
||||
|
||||
self.dataset = Dataset.from_pandas(df)
|
||||
|
||||
|
||||
class HuggingfaceDataset(BaseDataset[DictSample]):
|
||||
def __init__(self, config: HuggingfaceDatasetDef):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.dataset = None
|
||||
|
||||
@property
|
||||
def dataset_id(self) -> str:
|
||||
return self.config.identifier
|
||||
|
||||
def __iter__(self) -> Iterator[DictSample]:
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return (DictSample(data=x) for x in self.dataset)
|
||||
|
||||
def __str__(self):
|
||||
return f"HuggingfaceDataset({self.config})"
|
||||
|
||||
def __len__(self):
|
||||
if not self.dataset:
|
||||
self.load()
|
||||
return len(self.dataset)
|
||||
|
||||
def load(self):
|
||||
if self.dataset:
|
||||
return
|
||||
self.dataset = load_dataset(self.config.dataset_name, **self.config.kwargs)
|
||||
Loading…
Add table
Add a link
Reference in a new issue