evals new rebase

This commit is contained in:
Xi Yan 2024-10-10 11:35:26 -07:00
parent 89d24a07f0
commit 31c046dcdf
28 changed files with 1141 additions and 87 deletions

View file

@ -0,0 +1,85 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
import asyncio
import json
import fire
import httpx
from termcolor import cprint
from .evals import * # noqa: F403
class EvaluationClient(Evals):
def __init__(self, base_url: str):
self.base_url = base_url
async def initialize(self) -> None:
pass
async def shutdown(self) -> None:
pass
async def run_evals(
self,
model: str,
task: str,
dataset: Optional[str] = None,
eval_task_config: Optional[EvaluateTaskConfig] = None,
) -> EvaluateResponse:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{self.base_url}/evals/run",
json={
"model": model,
"task": task,
"dataset": dataset,
"eval_task_config": (
json.loads(eval_task_config.json())
if eval_task_config
else None
),
},
headers={"Content-Type": "application/json"},
timeout=3600,
)
response.raise_for_status()
return EvaluateResponse(**response.json())
async def run_main(host: str, port: int):
client = EvaluationClient(f"http://{host}:{port}")
# CustomDataset
response = await client.run_evals(
model="Llama3.1-8B-Instruct",
dataset="mmlu-simple-eval-en",
task="mmlu",
eval_task_config=EvaluateTaskConfig(
n_samples=2,
),
)
cprint(f"evaluate response={response}", "green")
# Eleuther Eval Task
# response = await client.run_evals(
# model="Llama3.1-8B-Instruct",
# task="meta_mmlu_pro_instruct",
# # task="meta_ifeval",
# eval_task_config=EvaluateTaskConfig(
# n_samples=2,
# )
# )
# cprint(response.metrics["metrics_table"], "red")
def main(host: str, port: int):
asyncio.run(run_main(host, port))
if __name__ == "__main__":
fire.Fire(main)

View file

@ -4,8 +4,7 @@
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from enum import Enum
from typing import List, Protocol
from typing import Protocol
from llama_models.schema_utils import webmethod
@ -13,23 +12,6 @@ from pydantic import BaseModel
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.dataset import * # noqa: F403
from llama_stack.apis.common.training_types import * # noqa: F403
class TextGenerationMetric(Enum):
perplexity = "perplexity"
rouge = "rouge"
bleu = "bleu"
class QuestionAnsweringMetric(Enum):
em = "em"
f1 = "f1"
class SummarizationMetric(Enum):
rouge = "rouge"
bleu = "bleu"
class EvaluationJob(BaseModel):
@ -40,37 +22,21 @@ class EvaluationJobLogStream(BaseModel):
job_uuid: str
class EvaluateTaskRequestCommon(BaseModel):
job_uuid: str
dataset: TrainEvalDataset
checkpoint: Checkpoint
# generation params
class EvaluateTaskConfig(BaseModel):
# num examples to evaluate, evaluate all if None
n_samples: Optional[int] = None
# model evaluation params
sampling_params: SamplingParams = SamplingParams()
@json_schema_type
class EvaluateTextGenerationRequest(EvaluateTaskRequestCommon):
"""Request to evaluate text generation."""
class EvaluateResponse(BaseModel):
"""Scores for evaluation."""
metrics: List[TextGenerationMetric]
metrics: Dict[str, str]
@json_schema_type
class EvaluateQuestionAnsweringRequest(EvaluateTaskRequestCommon):
"""Request to evaluate question answering."""
metrics: List[QuestionAnsweringMetric]
@json_schema_type
class EvaluateSummarizationRequest(EvaluateTaskRequestCommon):
"""Request to evaluate summarization."""
metrics: List[SummarizationMetric]
class EvaluationJobStatusResponse(BaseModel):
job_uuid: str
@ -82,41 +48,44 @@ class EvaluationJobArtifactsResponse(BaseModel):
job_uuid: str
class Evaluations(Protocol):
@webmethod(route="/evaluate/text_generation/")
def evaluate_text_generation(
@json_schema_type
class EvaluationJobCreateResponse(BaseModel):
"""Response to create a evaluation job."""
job_uuid: str
class Evals(Protocol):
@webmethod(route="/evals/run")
async def run_evals(
self,
metrics: List[TextGenerationMetric],
) -> EvaluationJob: ...
model: str,
task: str,
dataset: Optional[str] = None,
eval_task_config: Optional[EvaluateTaskConfig] = None,
) -> EvaluateResponse: ...
@webmethod(route="/evaluate/question_answering/")
def evaluate_question_answering(
self,
metrics: List[QuestionAnsweringMetric],
) -> EvaluationJob: ...
# @webmethod(route="/evals/jobs")
# def get_evaluation_jobs(self) -> List[EvaluationJob]: ...
@webmethod(route="/evaluate/summarization/")
def evaluate_summarization(
self,
metrics: List[SummarizationMetric],
) -> EvaluationJob: ...
# @webmethod(route="/evals/job/create")
# async def create_evaluation_job(
# self, model: str, dataset: str, task: str
# ) -> EvaluationJob: ...
@webmethod(route="/evaluate/jobs")
def get_evaluation_jobs(self) -> List[EvaluationJob]: ...
# @webmethod(route="/evals/job/status")
# def get_evaluation_job_status(
# self, job_uuid: str
# ) -> EvaluationJobStatusResponse: ...
@webmethod(route="/evaluate/job/status")
def get_evaluation_job_status(
self, job_uuid: str
) -> EvaluationJobStatusResponse: ...
# # sends SSE stream of logs
# @webmethod(route="/evals/job/logs")
# def get_evaluation_job_logstream(self, job_uuid: str) -> EvaluationJobLogStream: ...
# sends SSE stream of logs
@webmethod(route="/evaluate/job/logs")
def get_evaluation_job_logstream(self, job_uuid: str) -> EvaluationJobLogStream: ...
# @webmethod(route="/evals/job/cancel")
# def cancel_evaluation_job(self, job_uuid: str) -> None: ...
@webmethod(route="/evaluate/job/cancel")
def cancel_evaluation_job(self, job_uuid: str) -> None: ...
@webmethod(route="/evaluate/job/artifacts")
def get_evaluation_job_artifacts(
self, job_uuid: str
) -> EvaluationJobArtifactsResponse: ...
# @webmethod(route="/evals/job/artifacts")
# def get_evaluation_job_artifacts(
# self, job_uuid: str
# ) -> EvaluationJobArtifactsResponse: ...