mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
address comments
This commit is contained in:
parent
59c93548bc
commit
afa0c2b146
2 changed files with 15 additions and 11 deletions
|
@ -61,13 +61,13 @@ class PandasDataframeDataset(BaseDataset):
|
||||||
else:
|
else:
|
||||||
return self.df.iloc[idx].to_dict()
|
return self.df.iloc[idx].to_dict()
|
||||||
|
|
||||||
def _validate_dataset_schema(self) -> None:
|
def _validate_dataset_schema(self, df) -> pandas.DataFrame:
|
||||||
assert self.df is not None, "Dataset not loaded. Please call .load() first"
|
|
||||||
# note that we will drop any columns in dataset that are not in the schema
|
# note that we will drop any columns in dataset that are not in the schema
|
||||||
self.df = self.df[self.dataset_def.dataset_schema.keys()]
|
df = df[self.dataset_def.dataset_schema.keys()]
|
||||||
# check all columns in dataset schema are present
|
# check all columns in dataset schema are present
|
||||||
assert len(self.df.columns) == len(self.dataset_def.dataset_schema)
|
assert len(df.columns) == len(self.dataset_def.dataset_schema)
|
||||||
# TODO: type checking against column types in dataset schema
|
# TODO: type checking against column types in dataset schema
|
||||||
|
return df
|
||||||
|
|
||||||
def load(self) -> None:
|
def load(self) -> None:
|
||||||
if self.df is not None:
|
if self.df is not None:
|
||||||
|
@ -99,8 +99,7 @@ class PandasDataframeDataset(BaseDataset):
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported file type: {self.dataset_def.url}")
|
raise ValueError(f"Unsupported file type: {self.dataset_def.url}")
|
||||||
|
|
||||||
self.df = df
|
self.df = self._validate_dataset_schema(df)
|
||||||
self._validate_dataset_schema()
|
|
||||||
|
|
||||||
|
|
||||||
class MetaReferenceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
|
class MetaReferenceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
|
||||||
|
@ -136,7 +135,10 @@ class MetaReferenceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
|
||||||
dataset_info = self.dataset_infos.get(dataset_id)
|
dataset_info = self.dataset_infos.get(dataset_id)
|
||||||
dataset_info.dataset_impl.load()
|
dataset_info.dataset_impl.load()
|
||||||
|
|
||||||
if page_token is None or not page_token.isnumeric():
|
if page_token and not page_token.isnumeric():
|
||||||
|
raise ValueError("Invalid page_token")
|
||||||
|
|
||||||
|
if page_token is None:
|
||||||
next_page_token = 0
|
next_page_token = 0
|
||||||
else:
|
else:
|
||||||
next_page_token = int(page_token)
|
next_page_token = int(page_token)
|
||||||
|
|
|
@ -59,11 +59,11 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
||||||
for required_column in ["generated_answer", "expected_answer", "input_query"]:
|
for required_column in ["generated_answer", "expected_answer", "input_query"]:
|
||||||
if required_column not in dataset_def.dataset_schema:
|
if required_column not in dataset_def.dataset_schema:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Dataset {dataset_id} does not have a '{required_column}' column. Please make sure '{required_column}' column is in the dataset."
|
f"Dataset {dataset_id} does not have a '{required_column}' column."
|
||||||
)
|
)
|
||||||
if dataset_def.dataset_schema[required_column].type != "string":
|
if dataset_def.dataset_schema[required_column].type != "string":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Dataset {dataset_id} does not have a '{required_column}' column of type 'string'. Please make sure '{required_column}' column is of type 'string'."
|
f"Dataset {dataset_id} does not have a '{required_column}' column of type 'string'."
|
||||||
)
|
)
|
||||||
|
|
||||||
async def score_batch(
|
async def score_batch(
|
||||||
|
@ -73,12 +73,12 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
||||||
save_results_dataset: bool = False,
|
save_results_dataset: bool = False,
|
||||||
) -> ScoreBatchResponse:
|
) -> ScoreBatchResponse:
|
||||||
await self.validate_scoring_input_dataset_schema(dataset_id=dataset_id)
|
await self.validate_scoring_input_dataset_schema(dataset_id=dataset_id)
|
||||||
rows_paginated = await self.datasetio_api.get_rows_paginated(
|
all_rows = await self.datasetio_api.get_rows_paginated(
|
||||||
dataset_id=dataset_id,
|
dataset_id=dataset_id,
|
||||||
rows_in_page=-1,
|
rows_in_page=-1,
|
||||||
)
|
)
|
||||||
res = await self.score(
|
res = await self.score(
|
||||||
input_rows=rows_paginated.rows, scoring_functions=scoring_functions
|
input_rows=all_rows.rows, scoring_functions=scoring_functions
|
||||||
)
|
)
|
||||||
if save_results_dataset:
|
if save_results_dataset:
|
||||||
# TODO: persist and register dataset on to server for reading
|
# TODO: persist and register dataset on to server for reading
|
||||||
|
@ -94,6 +94,8 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
||||||
) -> ScoreResponse:
|
) -> ScoreResponse:
|
||||||
res = {}
|
res = {}
|
||||||
for scoring_fn_id in scoring_functions:
|
for scoring_fn_id in scoring_functions:
|
||||||
|
if scoring_fn_id not in SCORER_REGISTRY:
|
||||||
|
raise ValueError(f"Scoring function {scoring_fn_id} is not supported.")
|
||||||
scorer = SCORER_REGISTRY[scoring_fn_id]()
|
scorer = SCORER_REGISTRY[scoring_fn_id]()
|
||||||
score_results = scorer.score(input_rows)
|
score_results = scorer.score(input_rows)
|
||||||
agg_results = scorer.aggregate(score_results)
|
agg_results = scorer.aggregate(score_results)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue