mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
get task
This commit is contained in:
parent
8339b2cef3
commit
4f07aca309
6 changed files with 188 additions and 1 deletions
|
@ -0,0 +1,40 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
|
||||||
|
from urllib.parse import parse_qs, urlparse
|
||||||
|
|
||||||
|
import pandas
|
||||||
|
from datasets import Dataset, load_dataset
|
||||||
|
|
||||||
|
|
||||||
|
class BaseDataset:
|
||||||
|
def __init__(self, name: str):
|
||||||
|
self.dataset = None
|
||||||
|
self.dataset_id = name
|
||||||
|
self.type = self.__class__.__name__
|
||||||
|
|
||||||
|
|
||||||
|
class CustomDataset(BaseDataset):
|
||||||
|
def __init__(self, name, url):
|
||||||
|
super().__init__(name)
|
||||||
|
self.url = url
|
||||||
|
df = pandas.read_csv(self.url)
|
||||||
|
self.dataset = Dataset.from_pandas(df)
|
||||||
|
|
||||||
|
|
||||||
|
class HFDataset(BaseDataset):
|
||||||
|
def __init__(self, name, url):
|
||||||
|
super().__init__(name)
|
||||||
|
# URL following OpenAI's evals - hf://hendrycks_test?name=business_ethics&split=validation
|
||||||
|
self.url = url
|
||||||
|
parsed = urlparse(url)
|
||||||
|
query = parse_qs(parsed.query)
|
||||||
|
query = {k: v[0] for k, v in query.items()}
|
||||||
|
|
||||||
|
if parsed.scheme != "hf":
|
||||||
|
raise ValueError(f"Unknown HF dataset: {url}")
|
||||||
|
|
||||||
|
self.dataset = load_dataset(path, **query)
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
from .datasets import CustomDataset
|
||||||
|
|
||||||
|
# TODO: make this into a config based registry
|
||||||
|
DATASETS_REGISTRY = {
|
||||||
|
"mmlu_eval": CustomDataset(
|
||||||
|
name="mmlu_eval",
|
||||||
|
url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_dataset(dataset_id: str):
|
||||||
|
# get dataset concrete dataset implementation
|
||||||
|
return DATASETS_REGISTRY[dataset_id]
|
|
@ -6,6 +6,14 @@
|
||||||
|
|
||||||
from llama_stack.apis.inference import * # noqa: F403
|
from llama_stack.apis.inference import * # noqa: F403
|
||||||
from llama_stack.apis.evals import * # noqa: F403
|
from llama_stack.apis.evals import * # noqa: F403
|
||||||
|
from termcolor import cprint
|
||||||
|
|
||||||
|
from llama_stack.providers.impls.meta_reference.evals.datas.utils import ( # noqa: F403
|
||||||
|
get_dataset,
|
||||||
|
)
|
||||||
|
from llama_stack.providers.impls.meta_reference.evals.tasks.utils import ( # noqa: F403
|
||||||
|
get_task,
|
||||||
|
)
|
||||||
|
|
||||||
from .config import MetaReferenceEvalsImplConfig
|
from .config import MetaReferenceEvalsImplConfig
|
||||||
|
|
||||||
|
@ -26,7 +34,29 @@ class MetaReferenceEvalsImpl(Evals):
|
||||||
dataset: str,
|
dataset: str,
|
||||||
task: str,
|
task: str,
|
||||||
) -> EvaluateResponse:
|
) -> EvaluateResponse:
|
||||||
print("hi")
|
cprint(f"model={model}, dataset={dataset}, task={task}", "red")
|
||||||
|
|
||||||
|
# resolve dataset
|
||||||
|
# - either a custom URL dataset or HF URL dataset
|
||||||
|
dataset = get_dataset("mmlu_eval")
|
||||||
|
print(dataset.dataset)
|
||||||
|
|
||||||
|
# # resolve task and execute task
|
||||||
|
task_impl = get_task(task, dataset)
|
||||||
|
print(task_impl)
|
||||||
|
|
||||||
|
# # F1: this will generate a preprocessed list of input messages for model
|
||||||
|
# x1 = task_impl.preprocess(dataset)
|
||||||
|
|
||||||
|
# # call inference API w/ model
|
||||||
|
# generation_outputs = ["response1", "response2", "response3"]
|
||||||
|
|
||||||
|
# # F2: post process
|
||||||
|
# x2 = task_impl.postprocess(generation_outputs)
|
||||||
|
|
||||||
|
# # F3: score generation outputs
|
||||||
|
# scores = task_impl.score(x2)
|
||||||
|
|
||||||
return EvaluateResponse(
|
return EvaluateResponse(
|
||||||
metrics={
|
metrics={
|
||||||
"accuracy": 0.5,
|
"accuracy": 0.5,
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTask(ABC):
|
||||||
|
"""
|
||||||
|
Base class for all evaluation tasks.
|
||||||
|
Each task needs to implement the following methods:
|
||||||
|
- F1: preprocess_sample(self)
|
||||||
|
- F2: postprocess_sample(self)
|
||||||
|
- F3: score_sample(self)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dataset, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self._name = self.__class__.__name__
|
||||||
|
self.dataset = dataset
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def preprocess_sample(self, sample):
|
||||||
|
"""
|
||||||
|
F1: preprocess sample
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def postprocess_sample(self, sample):
|
||||||
|
"""
|
||||||
|
F2: postprocess sample
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def score_sample(self, sample, ground_truth):
|
||||||
|
"""
|
||||||
|
F3: score sample
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def preprocess(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def postprocess(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def score(self, generation):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MMLUTask(BaseTask):
|
||||||
|
"""
|
||||||
|
MMLU Task. Each task needs to implement the following methods:
|
||||||
|
- F1: preprocess_sample(self)
|
||||||
|
- F2: postprocess_sample(self)
|
||||||
|
- F3: score_sample(self)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, dataset, *args, **kwargs):
|
||||||
|
super().__init__(dataset, *args, **kwargs)
|
||||||
|
|
||||||
|
def preprocess_sample(self, sample):
|
||||||
|
"""
|
||||||
|
F1: preprocess sample
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def postprocess_sample(self, sample):
|
||||||
|
"""
|
||||||
|
F2: postprocess sample
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def score_sample(self, sample):
|
||||||
|
"""
|
||||||
|
F3: score sample
|
||||||
|
"""
|
||||||
|
pass
|
|
@ -0,0 +1,16 @@
|
||||||
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# This source code is licensed under the terms described in the LICENSE file in
|
||||||
|
# the root directory of this source tree.
|
||||||
|
from .tasks import * # noqa: F403
|
||||||
|
|
||||||
|
# TODO: make this into a config based registry
|
||||||
|
TASKS_REGISTRY = {
|
||||||
|
"mmlu": MMLUTask,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_task(task_id: str, dataset):
|
||||||
|
task_impl = TASKS_REGISTRY[task_id]
|
||||||
|
return task_impl(dataset)
|
|
@ -19,6 +19,7 @@ def available_providers() -> List[ProviderSpec]:
|
||||||
"pillow",
|
"pillow",
|
||||||
"pandas",
|
"pandas",
|
||||||
"scikit-learn",
|
"scikit-learn",
|
||||||
|
"datasets",
|
||||||
],
|
],
|
||||||
module="llama_stack.providers.impls.meta_reference.evals",
|
module="llama_stack.providers.impls.meta_reference.evals",
|
||||||
config_class="llama_stack.providers.impls.meta_reference.evals.MetaReferenceEvalsImplConfig",
|
config_class="llama_stack.providers.impls.meta_reference.evals.MetaReferenceEvalsImplConfig",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue