mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-09 11:20:58 +00:00
add data structure to tasks
This commit is contained in:
parent
9816c9aae6
commit
ad18dc94ac
7 changed files with 100 additions and 168 deletions
|
|
@ -9,11 +9,12 @@ from enum import Enum
|
|||
from typing import Any, Dict, Generic, Iterator, Literal, Protocol, TypeVar, Union
|
||||
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Annotated
|
||||
|
||||
# A sample (row) from raw dataset
|
||||
# A sample (row) from dataset
|
||||
TDatasetSample = TypeVar("TDatasetSample")
|
||||
|
||||
|
||||
|
|
@ -26,46 +27,20 @@ class DictSample(DatasetSample):
|
|||
data: Dict[str, Any]
|
||||
|
||||
|
||||
# A sample (row) from evals intermediate dataset
|
||||
TProcessedSample = TypeVar("TProcessedSample")
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class ProcessedDictSample(DatasetSample):
|
||||
data: Dict[str, Any]
|
||||
preprocessed: Dict[str, Any]
|
||||
prediction: Dict[str, Any]
|
||||
postprocessed: Dict[str, Any]
|
||||
class PredictionSample(BaseModel):
|
||||
completion_message: str
|
||||
|
||||
|
||||
# # A sample (row) after preprocessing the raw dataset
|
||||
# TPreprocessedSample = TypeVar("TPreprocessedSample")
|
||||
|
||||
# @json_schema_type
|
||||
# class PreprocessedSample(BaseModel): ...
|
||||
|
||||
# @json_schema_type
|
||||
# class InferencePreprocessedSample(PreprocessedSample):
|
||||
# # TODO: either keep it generic or specific to inference API
|
||||
# # messages: List[Message]
|
||||
# data: Dict[str, Any]
|
||||
|
||||
# # A sample (row) from model prediction output
|
||||
# TPredictionSample = TypeVar("TPredictionSample")
|
||||
|
||||
# @json_schema_type
|
||||
# class PredictionSample(BaseModel): ...
|
||||
|
||||
# @json_schema_type
|
||||
# class InferencePredictionSample(PredictionSample):
|
||||
# data: Dict[str, Any]
|
||||
|
||||
|
||||
# # A sample (row) from post-processed output
|
||||
# TPostprocessedSample = TypeVar("TPostprocessedSample")
|
||||
|
||||
# @json_schema_type
|
||||
# class PostprocessedSample(BaseModel): ...
|
||||
|
||||
# @json_schema_type
|
||||
# class InferencePostprocessedSample(PredictionSample):
|
||||
# data: Dict[str, Any]
|
||||
@json_schema_type
|
||||
class ProcessedDictSample(DictSample):
|
||||
preprocessed: Optional[Dict[str, Any]] = None
|
||||
prediction: Optional[PredictionSample] = None
|
||||
postprocessed: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
|||
|
|
@ -71,16 +71,7 @@ class EvaluateTaskConfig(BaseModel):
|
|||
sampling_params: SamplingParams = SamplingParams()
|
||||
|
||||
|
||||
class BaseTask(
|
||||
ABC,
|
||||
Generic[
|
||||
TDatasetSample,
|
||||
TPreprocessedSample,
|
||||
TPredictionSample,
|
||||
TPostprocessedSample,
|
||||
TSingleEvalResult,
|
||||
],
|
||||
):
|
||||
class BaseTask(ABC, Generic[TDatasetSample, TProcessedSample]):
|
||||
"""
|
||||
A task represents a single evaluation benchmark, including it's dataset, preprocessing, postprocessing and scoring methods.
|
||||
Base class for all evaluation tasks. Each task needs to implement the following methods:
|
||||
|
|
@ -94,17 +85,15 @@ class BaseTask(
|
|||
self._name = self.__class__.__name__
|
||||
|
||||
@abstractmethod
|
||||
def preprocess_sample(self, sample: TDatasetSample) -> TPreprocessedSample:
|
||||
def preprocess_sample(self, sample: TDatasetSample) -> TProcessedSample:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
def postprocess_sample(self, sample: TPredictionSample) -> TPostprocessedSample:
|
||||
def postprocess_sample(self, sample: TProcessedSample) -> TProcessedSample:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
def score_sample(
|
||||
self, sample: TPostprocessedSample, ground_truth: TPreprocessedSample
|
||||
):
|
||||
def score_sample(self, sample: TProcessedSample) -> SingleEvalResult:
|
||||
raise NotImplementedError()
|
||||
|
||||
@abstractmethod
|
||||
|
|
@ -112,24 +101,15 @@ class BaseTask(
|
|||
raise NotImplementedError()
|
||||
|
||||
def preprocess(
|
||||
self, dataset: BaseDataset[TDatasetSample]
|
||||
) -> List[TPreprocessedSample]:
|
||||
return [self.preprocess_sample(sample) for sample in self.dataset]
|
||||
self, dataset: BaseDataset[TProcessedSample]
|
||||
) -> List[TProcessedSample]:
|
||||
return [self.preprocess_sample(sample) for sample in dataset]
|
||||
|
||||
def postprocess(
|
||||
self, generation: List[TPredictionSample]
|
||||
) -> List[TPostprocessedSample]:
|
||||
def postprocess(self, generation: List[TProcessedSample]) -> List[TProcessedSample]:
|
||||
return [self.postprocess_sample(sample) for sample in generation]
|
||||
|
||||
def score(
|
||||
self,
|
||||
postprocessed: List[TPostprocessedSample],
|
||||
preprocessed_dataset: List[TPreprocessedSample],
|
||||
) -> List[TSingleEvalResult]:
|
||||
return [
|
||||
self.score_sample(sample, ground_truth)
|
||||
for sample, ground_truth in zip(postprocessed, self.preprocessed_dataset)
|
||||
]
|
||||
def score(self, postprocessed: List[TProcessedSample]) -> List[SingleEvalResult]:
|
||||
return [self.score_sample(sample) for sample in postprocessed]
|
||||
|
||||
|
||||
class Evals(Protocol):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue