mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
scoring_function -> scoring_fn rename, scorer -> scoring_fn rename
This commit is contained in:
parent
cdfd584a8f
commit
6b0baa6d53
11 changed files with 46 additions and 48 deletions
|
@ -37,7 +37,7 @@ class ScoreResponse(BaseModel):
|
|||
|
||||
|
||||
class ScoringFunctionStore(Protocol):
|
||||
def get_scoring_function(self, name: str) -> ScoringFunctionDefWithProvider: ...
|
||||
def get_scoring_function(self, name: str) -> ScoringFnDefWithProvider: ...
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
|
|
|
@ -29,7 +29,7 @@ class LLMAsJudgeContext(BaseModel):
|
|||
|
||||
|
||||
@json_schema_type
|
||||
class ScoringFunctionDef(BaseModel):
|
||||
class ScoringFnDef(BaseModel):
|
||||
identifier: str
|
||||
description: Optional[str] = None
|
||||
metadata: Dict[str, Any] = Field(
|
||||
|
@ -48,7 +48,7 @@ class ScoringFunctionDef(BaseModel):
|
|||
|
||||
|
||||
@json_schema_type
|
||||
class ScoringFunctionDefWithProvider(ScoringFunctionDef):
|
||||
class ScoringFnDefWithProvider(ScoringFnDef):
|
||||
provider_id: str = Field(
|
||||
description="ID of the provider which serves this dataset",
|
||||
)
|
||||
|
@ -57,14 +57,14 @@ class ScoringFunctionDefWithProvider(ScoringFunctionDef):
|
|||
@runtime_checkable
|
||||
class ScoringFunctions(Protocol):
|
||||
@webmethod(route="/scoring_functions/list", method="GET")
|
||||
async def list_scoring_functions(self) -> List[ScoringFunctionDefWithProvider]: ...
|
||||
async def list_scoring_functions(self) -> List[ScoringFnDefWithProvider]: ...
|
||||
|
||||
@webmethod(route="/scoring_functions/get", method="GET")
|
||||
async def get_scoring_function(
|
||||
self, name: str
|
||||
) -> Optional[ScoringFunctionDefWithProvider]: ...
|
||||
) -> Optional[ScoringFnDefWithProvider]: ...
|
||||
|
||||
@webmethod(route="/scoring_functions/register", method="POST")
|
||||
async def register_scoring_function(
|
||||
self, function_def: ScoringFunctionDefWithProvider
|
||||
self, function_def: ScoringFnDefWithProvider
|
||||
) -> None: ...
|
||||
|
|
|
@ -34,7 +34,7 @@ RoutableObject = Union[
|
|||
ShieldDef,
|
||||
MemoryBankDef,
|
||||
DatasetDef,
|
||||
ScoringFunctionDef,
|
||||
ScoringFnDef,
|
||||
]
|
||||
|
||||
RoutableObjectWithProvider = Union[
|
||||
|
@ -42,7 +42,7 @@ RoutableObjectWithProvider = Union[
|
|||
ShieldDefWithProvider,
|
||||
MemoryBankDefWithProvider,
|
||||
DatasetDefWithProvider,
|
||||
ScoringFunctionDefWithProvider,
|
||||
ScoringFnDefWithProvider,
|
||||
]
|
||||
|
||||
RoutedProtocol = Union[
|
||||
|
|
|
@ -100,7 +100,7 @@ class CommonRoutingTableImpl(RoutingTable):
|
|||
scoring_functions = await p.list_scoring_functions()
|
||||
add_objects(
|
||||
[
|
||||
ScoringFunctionDefWithProvider(**s.dict(), provider_id=pid)
|
||||
ScoringFnDefWithProvider(**s.dict(), provider_id=pid)
|
||||
for s in scoring_functions
|
||||
]
|
||||
)
|
||||
|
@ -239,7 +239,7 @@ class DatasetsRoutingTable(CommonRoutingTableImpl, Datasets):
|
|||
|
||||
|
||||
class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, Scoring):
|
||||
async def list_scoring_functions(self) -> List[ScoringFunctionDefWithProvider]:
|
||||
async def list_scoring_functions(self) -> List[ScoringFnDefWithProvider]:
|
||||
objects = []
|
||||
for objs in self.registry.values():
|
||||
objects.extend(objs)
|
||||
|
@ -247,10 +247,10 @@ class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, Scoring):
|
|||
|
||||
async def get_scoring_function(
|
||||
self, name: str
|
||||
) -> Optional[ScoringFunctionDefWithProvider]:
|
||||
) -> Optional[ScoringFnDefWithProvider]:
|
||||
return self.get_object_by_identifier(name)
|
||||
|
||||
async def register_scoring_function(
|
||||
self, function_def: ScoringFunctionDefWithProvider
|
||||
self, function_def: ScoringFnDefWithProvider
|
||||
) -> None:
|
||||
await self.register_object(function_def)
|
||||
|
|
|
@ -13,7 +13,7 @@ from pydantic import BaseModel, Field
|
|||
from llama_stack.apis.datasets import DatasetDef
|
||||
from llama_stack.apis.memory_banks import MemoryBankDef
|
||||
from llama_stack.apis.models import ModelDef
|
||||
from llama_stack.apis.scoring_functions import ScoringFunctionDef
|
||||
from llama_stack.apis.scoring_functions import ScoringFnDef
|
||||
from llama_stack.apis.shields import ShieldDef
|
||||
|
||||
|
||||
|
@ -64,11 +64,9 @@ class DatasetsProtocolPrivate(Protocol):
|
|||
|
||||
|
||||
class ScoringFunctionsProtocolPrivate(Protocol):
|
||||
async def list_scoring_functions(self) -> List[ScoringFunctionDef]: ...
|
||||
async def list_scoring_functions(self) -> List[ScoringFnDef]: ...
|
||||
|
||||
async def register_scoring_function(
|
||||
self, function_def: ScoringFunctionDef
|
||||
) -> None: ...
|
||||
async def register_scoring_function(self, function_def: ScoringFnDef) -> None: ...
|
||||
|
||||
|
||||
@json_schema_type
|
||||
|
|
|
@ -13,22 +13,22 @@ from llama_stack.apis.datasetio import * # noqa: F403
|
|||
from llama_stack.apis.datasets import * # noqa: F403
|
||||
|
||||
from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.equality_scorer import (
|
||||
EqualityScorer,
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.equality_scoring_fn import (
|
||||
EqualityScoringFn,
|
||||
)
|
||||
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.subset_of_scorer import (
|
||||
SubsetOfScorer,
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.subset_of_scoring_fn import (
|
||||
SubsetOfScoringFn,
|
||||
)
|
||||
|
||||
from .config import MetaReferenceScoringConfig
|
||||
|
||||
SUPPORTED_SCORERS = [
|
||||
EqualityScorer,
|
||||
SubsetOfScorer,
|
||||
SUPPORTED_SCORING_FNS = [
|
||||
EqualityScoringFn,
|
||||
SubsetOfScoringFn,
|
||||
]
|
||||
|
||||
SCORER_REGISTRY = {x.scoring_function_def.identifier: x for x in SUPPORTED_SCORERS}
|
||||
SCORER_REGISTRY = {x.scoring_function_def.identifier: x for x in SUPPORTED_SCORING_FNS}
|
||||
|
||||
|
||||
class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
||||
|
@ -46,10 +46,10 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
|||
|
||||
async def shutdown(self) -> None: ...
|
||||
|
||||
async def list_scoring_functions(self) -> List[ScoringFunctionDef]:
|
||||
return [x.scoring_function_def for x in SUPPORTED_SCORERS]
|
||||
async def list_scoring_functions(self) -> List[ScoringFnDef]:
|
||||
return [x.scoring_function_def for x in SUPPORTED_SCORING_FNS]
|
||||
|
||||
async def register_scoring_function(self, function_def: ScoringFunctionDef) -> None:
|
||||
async def register_scoring_function(self, function_def: ScoringFnDef) -> None:
|
||||
raise NotImplementedError(
|
||||
"Dynamically registering scoring functions is not supported"
|
||||
)
|
||||
|
@ -101,9 +101,9 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
|||
for scoring_fn_id in scoring_functions:
|
||||
if scoring_fn_id not in SCORER_REGISTRY:
|
||||
raise ValueError(f"Scoring function {scoring_fn_id} is not supported.")
|
||||
scorer = SCORER_REGISTRY[scoring_fn_id]()
|
||||
score_results = scorer.score(input_rows)
|
||||
agg_results = scorer.aggregate(score_results)
|
||||
scoring_fn = SCORER_REGISTRY[scoring_fn_id]()
|
||||
score_results = scoring_fn.score(input_rows)
|
||||
agg_results = scoring_fn.aggregate(score_results)
|
||||
res[scoring_fn_id] = ScoringResult(
|
||||
score_rows=score_results,
|
||||
aggregated_results=agg_results,
|
||||
|
|
|
@ -9,15 +9,15 @@ from llama_stack.apis.scoring_functions import * # noqa: F401, F403
|
|||
from llama_stack.apis.scoring import * # noqa: F401, F403
|
||||
|
||||
|
||||
class BaseScorer(ABC):
|
||||
class BaseScoringFn(ABC):
|
||||
"""
|
||||
Base interface class for all meta-reference scorers.
|
||||
Each scorer needs to implement the following methods:
|
||||
Base interface class for all meta-reference scoring_fns.
|
||||
Each scoring_fn needs to implement the following methods:
|
||||
- score_row(self, row)
|
||||
- aggregate(self, scorer_results)
|
||||
- aggregate(self, scoring_fn_results)
|
||||
"""
|
||||
|
||||
scoring_function_def: ScoringFunctionDef
|
||||
scoring_function_def: ScoringFnDef
|
||||
|
||||
def __init__(self, *args, **kwargs) -> None:
|
||||
super().__init__(*args, **kwargs)
|
|
@ -4,23 +4,23 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.base_scorer import (
|
||||
BaseScorer,
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
|
||||
BaseScoringFn,
|
||||
)
|
||||
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
|
||||
from llama_stack.apis.scoring import * # noqa: F401, F403
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.common import (
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
|
||||
aggregate_accuracy,
|
||||
)
|
||||
|
||||
|
||||
class EqualityScorer(BaseScorer):
|
||||
class EqualityScoringFn(BaseScoringFn):
|
||||
"""
|
||||
A scorer that assigns a score of 1.0 if the input string matches the target string, and 0.0 otherwise.
|
||||
A scoring_fn that assigns a score of 1.0 if the input string matches the target string, and 0.0 otherwise.
|
||||
"""
|
||||
|
||||
scoring_function_def = ScoringFunctionDef(
|
||||
scoring_function_def = ScoringFnDef(
|
||||
identifier="equality",
|
||||
description="Returns 1.0 if the input is equal to the target, 0.0 otherwise.",
|
||||
parameters=[],
|
|
@ -4,23 +4,23 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.base_scorer import (
|
||||
BaseScorer,
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
|
||||
BaseScoringFn,
|
||||
)
|
||||
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
|
||||
from llama_stack.apis.scoring import * # noqa: F401, F403
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scorer.common import (
|
||||
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
|
||||
aggregate_accuracy,
|
||||
)
|
||||
|
||||
|
||||
class SubsetOfScorer(BaseScorer):
|
||||
class SubsetOfScoringFn(BaseScoringFn):
|
||||
"""
|
||||
A scorer that assigns a score of 1.0 if the expected string is included in the generated string, and 0.0 otherwise.
|
||||
A scoring_fn that assigns a score of 1.0 if the expected string is included in the generated string, and 0.0 otherwise.
|
||||
"""
|
||||
|
||||
scoring_function_def = ScoringFunctionDef(
|
||||
scoring_function_def = ScoringFnDef(
|
||||
identifier="subset_of",
|
||||
description="Returns 1.0 if the expected is included in generated, 0.0 otherwise.",
|
||||
parameters=[],
|
Loading…
Add table
Add a link
Reference in a new issue