mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-16 06:53:47 +00:00
only keep 1 run_eval
This commit is contained in:
parent
6b889651d6
commit
fd581c3d88
4 changed files with 45 additions and 76 deletions
|
@ -7,14 +7,7 @@ from enum import Enum
|
|||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
|
||||
from .....apis.common.job_types import Job
|
||||
from .....apis.eval.eval import (
|
||||
AppEvalTaskConfig,
|
||||
BenchmarkEvalTaskConfig,
|
||||
Eval,
|
||||
EvalTaskConfig,
|
||||
EvaluateResponse,
|
||||
JobStatus,
|
||||
)
|
||||
from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatus
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
from llama_stack.apis.datasetio import DatasetIO
|
||||
from llama_stack.apis.datasets import Datasets
|
||||
|
@ -98,21 +91,15 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
f"Dataset {dataset_id} does not have a correct input schema in {expected_schemas}"
|
||||
)
|
||||
|
||||
async def run_benchmark(
|
||||
self,
|
||||
benchmark_id: str,
|
||||
benchmark_config: BenchmarkEvalTaskConfig,
|
||||
) -> Job:
|
||||
raise NotImplementedError("Benchmark eval is not implemented yet")
|
||||
|
||||
async def run_eval(
|
||||
self,
|
||||
task: EvalTaskDef,
|
||||
task_config: AppEvalTaskConfig,
|
||||
task_id: str,
|
||||
task_def: EvalTaskDef,
|
||||
task_config: EvalTaskConfig,
|
||||
) -> Job:
|
||||
dataset_id = task.dataset_id
|
||||
dataset_id = task_def.dataset_id
|
||||
candidate = task_config.eval_candidate
|
||||
scoring_functions = task.scoring_functions
|
||||
scoring_functions = task_def.scoring_functions
|
||||
|
||||
await self.validate_eval_input_dataset_schema(dataset_id=dataset_id)
|
||||
all_rows = await self.datasetio_api.get_rows_paginated(
|
||||
|
@ -120,6 +107,7 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
rows_in_page=-1,
|
||||
)
|
||||
res = await self.evaluate_rows(
|
||||
task_id=task_id,
|
||||
input_rows=all_rows.rows,
|
||||
scoring_functions=scoring_functions,
|
||||
task_config=task_config,
|
||||
|
@ -133,10 +121,10 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
|
||||
async def evaluate_rows(
|
||||
self,
|
||||
task_id: str,
|
||||
input_rows: List[Dict[str, Any]],
|
||||
scoring_functions: List[str],
|
||||
task_config: EvalTaskConfig,
|
||||
eval_task_id: Optional[str] = None,
|
||||
) -> EvaluateResponse:
|
||||
candidate = task_config.eval_candidate
|
||||
if candidate.type == "agent":
|
||||
|
@ -206,17 +194,17 @@ class MetaReferenceEvalImpl(Eval, EvalTasksProtocolPrivate):
|
|||
|
||||
return EvaluateResponse(generations=generations, scores=score_response.results)
|
||||
|
||||
async def job_status(self, job_id: str, eval_task_id: str) -> Optional[JobStatus]:
|
||||
async def job_status(self, task_id: str, job_id: str) -> Optional[JobStatus]:
|
||||
if job_id in self.jobs:
|
||||
return JobStatus.completed
|
||||
|
||||
return None
|
||||
|
||||
async def job_cancel(self, job_id: str, eval_task_id: str) -> None:
|
||||
async def job_cancel(self, task_id: str, job_id: str) -> None:
|
||||
raise NotImplementedError("Job cancel is not implemented yet")
|
||||
|
||||
async def job_result(self, job_id: str, eval_task_id: str) -> EvaluateResponse:
|
||||
status = await self.job_status(job_id, eval_task_id)
|
||||
async def job_result(self, task_id: str, job_id: str) -> EvaluateResponse:
|
||||
status = await self.job_status(task_id, job_id)
|
||||
if not status or status != JobStatus.completed:
|
||||
raise ValueError(f"Job is not completed, Status: {status.value}")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue