mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 10:54:19 +00:00
* wip * dataset validation * test_scoring * cleanup * clean up test * comments * error checking * dataset client * test client: * datasetio client * clean up * basic scoring function works * scorer wip * equality scorer * score batch impl * score batch * update scoring test * refactor * validate scorer input * address comments * evals with generation * add all rows scores to ScoringResult * minor typing * bugfix * scoring function def rename * rebase name * refactor * address comments * Update iOS inference instructions for new quantization * Small updates to quantization config * Fix score threshold in faiss * Bump version to 0.0.45 * Handle both ipv6 and ipv4 interfaces together * update manifest for build templates * Update getting_started.md * chatcompletion & completion input type validation * inclusion->subsetof * error checking * scoring_function -> scoring_fn rename, scorer -> scoring_fn rename * address comments * [Evals API][5/n] fixes to generate openapi spec (#323) * generate openapi * typing comment, dataset -> dataset_id * remove custom type * sample eval run.yaml --------- Co-authored-by: Dalton Flanagan <6599399+dltn@users.noreply.github.com> Co-authored-by: Ashwin Bharambe <ashwin.bharambe@gmail.com>
70 lines
2 KiB
Python
70 lines
2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Literal, Optional, Protocol, Union
|
|
|
|
from typing_extensions import Annotated
|
|
|
|
from llama_models.llama3.api.datatypes import * # noqa: F403
|
|
from llama_models.schema_utils import json_schema_type, webmethod
|
|
from llama_stack.apis.scoring_functions import * # noqa: F403
|
|
from llama_stack.apis.agents import AgentConfig
|
|
from llama_stack.apis.common.job_types import Job, JobStatus
|
|
from llama_stack.apis.scoring import * # noqa: F403
|
|
|
|
|
|
@json_schema_type
|
|
class ModelCandidate(BaseModel):
|
|
type: Literal["model"] = "model"
|
|
model: str
|
|
sampling_params: SamplingParams
|
|
system_message: Optional[SystemMessage] = None
|
|
|
|
|
|
@json_schema_type
|
|
class AgentCandidate(BaseModel):
|
|
type: Literal["agent"] = "agent"
|
|
config: AgentConfig
|
|
|
|
|
|
EvalCandidate = Annotated[
|
|
Union[ModelCandidate, AgentCandidate], Field(discriminator="type")
|
|
]
|
|
|
|
|
|
@json_schema_type
|
|
class EvaluateResponse(BaseModel):
|
|
generations: List[Dict[str, Any]]
|
|
|
|
# each key in the dict is a scoring function name
|
|
scores: Dict[str, ScoringResult]
|
|
|
|
|
|
class Eval(Protocol):
|
|
@webmethod(route="/eval/evaluate_batch", method="POST")
|
|
async def evaluate_batch(
|
|
self,
|
|
dataset_id: str,
|
|
candidate: EvalCandidate,
|
|
scoring_functions: List[str],
|
|
) -> Job: ...
|
|
|
|
@webmethod(route="/eval/evaluate", method="POST")
|
|
async def evaluate(
|
|
self,
|
|
input_rows: List[Dict[str, Any]],
|
|
candidate: EvalCandidate,
|
|
scoring_functions: List[str],
|
|
) -> EvaluateResponse: ...
|
|
|
|
@webmethod(route="/eval/job/status", method="GET")
|
|
async def job_status(self, job_id: str) -> Optional[JobStatus]: ...
|
|
|
|
@webmethod(route="/eval/job/cancel", method="POST")
|
|
async def job_cancel(self, job_id: str) -> None: ...
|
|
|
|
@webmethod(route="/eval/job/result", method="GET")
|
|
async def job_result(self, job_id: str) -> EvaluateResponse: ...
|