mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 16:44:01 +00:00
Merge branch 'api_2' into api_3
This commit is contained in:
commit
ea0b29ae50
4 changed files with 72 additions and 39 deletions
|
|
@ -8,15 +8,22 @@ from typing import Any, Dict, List, Literal, Optional, Protocol, runtime_checkab
|
|||
from pydantic import BaseModel, Field
|
||||
|
||||
from llama_stack.apis.resource import Resource, ResourceType
|
||||
from llama_stack.apis.scoring_functions import ScoringFnParams
|
||||
from llama_stack.schema_utils import json_schema_type, webmethod
|
||||
|
||||
|
||||
class CommonBenchmarkFields(BaseModel):
|
||||
"""
|
||||
:param dataset_id: The ID of the dataset to used to run the benchmark.
|
||||
:param scoring_functions: The scoring functions with parameters to use for this benchmark.
|
||||
:param metadata: Metadata for this benchmark for additional descriptions.
|
||||
"""
|
||||
|
||||
dataset_id: str
|
||||
scoring_functions: List[str]
|
||||
scoring_functions: List[ScoringFnParams]
|
||||
metadata: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Metadata for this evaluation task",
|
||||
description="Metadata for this benchmark",
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -57,10 +64,17 @@ class Benchmarks(Protocol):
|
|||
@webmethod(route="/eval/benchmarks", method="POST")
|
||||
async def register_benchmark(
|
||||
self,
|
||||
benchmark_id: str,
|
||||
dataset_id: str,
|
||||
scoring_functions: List[str],
|
||||
provider_benchmark_id: Optional[str] = None,
|
||||
provider_id: Optional[str] = None,
|
||||
scoring_functions: List[ScoringFnParams],
|
||||
benchmark_id: Optional[str] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> None: ...
|
||||
) -> Benchmark:
|
||||
"""
|
||||
Register a new benchmark.
|
||||
|
||||
:param dataset_id: The ID of the dataset to used to run the benchmark.
|
||||
:param scoring_functions: The scoring functions with parameters to use for this benchmark.
|
||||
:param benchmark_id: (Optional) The ID of the benchmark to register. If not provided, a random ID will be generated.
|
||||
:param metadata: (Optional) Metadata for this benchmark for additional descriptions.
|
||||
"""
|
||||
...
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ from typing import (
|
|||
Literal,
|
||||
Optional,
|
||||
Protocol,
|
||||
runtime_checkable,
|
||||
Union,
|
||||
runtime_checkable,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
|
@ -218,9 +218,7 @@ class CommonScoringFnFields(BaseModel):
|
|||
|
||||
@json_schema_type
|
||||
class ScoringFn(CommonScoringFnFields, Resource):
|
||||
type: Literal[ResourceType.scoring_function.value] = (
|
||||
ResourceType.scoring_function.value
|
||||
)
|
||||
type: Literal[ResourceType.scoring_function.value] = ResourceType.scoring_function.value
|
||||
|
||||
@property
|
||||
def scoring_fn_id(self) -> str:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue