cleanup hardcoded dataset registry

This commit is contained in:
Xi Yan 2024-10-14 14:19:15 -07:00
parent a9210cd416
commit 9c501d042b
3 changed files with 31 additions and 29 deletions

View file

@ -26,7 +26,7 @@ def deserialize_dataset_def(j: Optional[Dict[str, Any]]) -> Optional[DatasetDef]
raise ValueError(f"Unknown dataset type: {j['type']}")
class DatasetClient(Datasets):
class DatasetsClient(Datasets):
def __init__(self, base_url: str):
self.base_url = base_url
@ -104,7 +104,7 @@ class DatasetClient(Datasets):
async def run_main(host: str, port: int):
client = DatasetClient(f"http://{host}:{port}")
client = DatasetsClient(f"http://{host}:{port}")
# register dataset
response = await client.create_dataset(
@ -115,6 +115,16 @@ async def run_main(host: str, port: int):
)
cprint(response, "green")
# register HF dataset
response = await client.create_dataset(
dataset_def=HuggingfaceDatasetDef(
identifier="hellaswag",
dataset_name="hellaswag",
kwargs={"split": "validation", "trust_remote_code": True},
)
)
cprint(response, "green")
# get dataset
get_dataset = await client.get_dataset(
dataset_identifier="test-dataset",

View file

@ -12,6 +12,7 @@ import httpx
from termcolor import cprint
from .evals import * # noqa: F403
from ..datasets.client import DatasetsClient
class EvaluationClient(Evals):
@ -54,13 +55,31 @@ class EvaluationClient(Evals):
async def run_main(host: str, port: int):
client = EvaluationClient(f"http://{host}:{port}")
dataset_client = DatasetsClient(f"http://{host}:{port}")
# Custom Eval Task
# 1. register custom dataset
response = await dataset_client.create_dataset(
dataset_def=CustomDatasetDef(
identifier="mmlu-simple-eval-en",
url="https://openaipublic.blob.core.windows.net/simple-evals/mmlu.csv",
),
)
cprint(f"datasets/create: {response}", "cyan")
# 2. run evals on the registered dataset
response = await client.run_evals(
model="Llama3.1-8B-Instruct",
dataset="mmlu-simple-eval-en",
task="mmlu",
)
if response.formatted_report:
cprint(response.formatted_report, "green")
else:
cprint(f"Response: {response}", "green")
# Eleuther Eval Task
# response = await client.run_evals(
# model="Llama3.1-8B-Instruct",
@ -70,10 +89,6 @@ async def run_main(host: str, port: int):
# n_samples=2,
# ),
# )
if response.formatted_report:
cprint(response.formatted_report, "green")
else:
cprint(f"Response: {response}", "green")
def main(host: str, port: int):