scoring_function -> scoring_fn rename, scorer -> scoring_fn rename

This commit is contained in:
Xi Yan 2024-10-24 21:06:38 -07:00
parent cdfd584a8f
commit 6b0baa6d53
11 changed files with 46 additions and 48 deletions

View file

@ -37,7 +37,7 @@ class ScoreResponse(BaseModel):
class ScoringFunctionStore(Protocol): class ScoringFunctionStore(Protocol):
def get_scoring_function(self, name: str) -> ScoringFunctionDefWithProvider: ... def get_scoring_function(self, name: str) -> ScoringFnDefWithProvider: ...
@runtime_checkable @runtime_checkable

View file

@ -29,7 +29,7 @@ class LLMAsJudgeContext(BaseModel):
@json_schema_type @json_schema_type
class ScoringFunctionDef(BaseModel): class ScoringFnDef(BaseModel):
identifier: str identifier: str
description: Optional[str] = None description: Optional[str] = None
metadata: Dict[str, Any] = Field( metadata: Dict[str, Any] = Field(
@ -48,7 +48,7 @@ class ScoringFunctionDef(BaseModel):
@json_schema_type @json_schema_type
class ScoringFunctionDefWithProvider(ScoringFunctionDef): class ScoringFnDefWithProvider(ScoringFnDef):
provider_id: str = Field( provider_id: str = Field(
description="ID of the provider which serves this dataset", description="ID of the provider which serves this dataset",
) )
@ -57,14 +57,14 @@ class ScoringFunctionDefWithProvider(ScoringFunctionDef):
@runtime_checkable @runtime_checkable
class ScoringFunctions(Protocol): class ScoringFunctions(Protocol):
@webmethod(route="/scoring_functions/list", method="GET") @webmethod(route="/scoring_functions/list", method="GET")
async def list_scoring_functions(self) -> List[ScoringFunctionDefWithProvider]: ... async def list_scoring_functions(self) -> List[ScoringFnDefWithProvider]: ...
@webmethod(route="/scoring_functions/get", method="GET") @webmethod(route="/scoring_functions/get", method="GET")
async def get_scoring_function( async def get_scoring_function(
self, name: str self, name: str
) -> Optional[ScoringFunctionDefWithProvider]: ... ) -> Optional[ScoringFnDefWithProvider]: ...
@webmethod(route="/scoring_functions/register", method="POST") @webmethod(route="/scoring_functions/register", method="POST")
async def register_scoring_function( async def register_scoring_function(
self, function_def: ScoringFunctionDefWithProvider self, function_def: ScoringFnDefWithProvider
) -> None: ... ) -> None: ...

View file

@ -34,7 +34,7 @@ RoutableObject = Union[
ShieldDef, ShieldDef,
MemoryBankDef, MemoryBankDef,
DatasetDef, DatasetDef,
ScoringFunctionDef, ScoringFnDef,
] ]
RoutableObjectWithProvider = Union[ RoutableObjectWithProvider = Union[
@ -42,7 +42,7 @@ RoutableObjectWithProvider = Union[
ShieldDefWithProvider, ShieldDefWithProvider,
MemoryBankDefWithProvider, MemoryBankDefWithProvider,
DatasetDefWithProvider, DatasetDefWithProvider,
ScoringFunctionDefWithProvider, ScoringFnDefWithProvider,
] ]
RoutedProtocol = Union[ RoutedProtocol = Union[

View file

@ -100,7 +100,7 @@ class CommonRoutingTableImpl(RoutingTable):
scoring_functions = await p.list_scoring_functions() scoring_functions = await p.list_scoring_functions()
add_objects( add_objects(
[ [
ScoringFunctionDefWithProvider(**s.dict(), provider_id=pid) ScoringFnDefWithProvider(**s.dict(), provider_id=pid)
for s in scoring_functions for s in scoring_functions
] ]
) )
@ -239,7 +239,7 @@ class DatasetsRoutingTable(CommonRoutingTableImpl, Datasets):
class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, Scoring): class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, Scoring):
async def list_scoring_functions(self) -> List[ScoringFunctionDefWithProvider]: async def list_scoring_functions(self) -> List[ScoringFnDefWithProvider]:
objects = [] objects = []
for objs in self.registry.values(): for objs in self.registry.values():
objects.extend(objs) objects.extend(objs)
@ -247,10 +247,10 @@ class ScoringFunctionsRoutingTable(CommonRoutingTableImpl, Scoring):
async def get_scoring_function( async def get_scoring_function(
self, name: str self, name: str
) -> Optional[ScoringFunctionDefWithProvider]: ) -> Optional[ScoringFnDefWithProvider]:
return self.get_object_by_identifier(name) return self.get_object_by_identifier(name)
async def register_scoring_function( async def register_scoring_function(
self, function_def: ScoringFunctionDefWithProvider self, function_def: ScoringFnDefWithProvider
) -> None: ) -> None:
await self.register_object(function_def) await self.register_object(function_def)

View file

@ -13,7 +13,7 @@ from pydantic import BaseModel, Field
from llama_stack.apis.datasets import DatasetDef from llama_stack.apis.datasets import DatasetDef
from llama_stack.apis.memory_banks import MemoryBankDef from llama_stack.apis.memory_banks import MemoryBankDef
from llama_stack.apis.models import ModelDef from llama_stack.apis.models import ModelDef
from llama_stack.apis.scoring_functions import ScoringFunctionDef from llama_stack.apis.scoring_functions import ScoringFnDef
from llama_stack.apis.shields import ShieldDef from llama_stack.apis.shields import ShieldDef
@ -64,11 +64,9 @@ class DatasetsProtocolPrivate(Protocol):
class ScoringFunctionsProtocolPrivate(Protocol): class ScoringFunctionsProtocolPrivate(Protocol):
async def list_scoring_functions(self) -> List[ScoringFunctionDef]: ... async def list_scoring_functions(self) -> List[ScoringFnDef]: ...
async def register_scoring_function( async def register_scoring_function(self, function_def: ScoringFnDef) -> None: ...
self, function_def: ScoringFunctionDef
) -> None: ...
@json_schema_type @json_schema_type

View file

@ -13,22 +13,22 @@ from llama_stack.apis.datasetio import * # noqa: F403
from llama_stack.apis.datasets import * # noqa: F403 from llama_stack.apis.datasets import * # noqa: F403
from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack.providers.impls.meta_reference.scoring.scorer.equality_scorer import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.equality_scoring_fn import (
EqualityScorer, EqualityScoringFn,
) )
from llama_stack.providers.impls.meta_reference.scoring.scorer.subset_of_scorer import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.subset_of_scoring_fn import (
SubsetOfScorer, SubsetOfScoringFn,
) )
from .config import MetaReferenceScoringConfig from .config import MetaReferenceScoringConfig
SUPPORTED_SCORERS = [ SUPPORTED_SCORING_FNS = [
EqualityScorer, EqualityScoringFn,
SubsetOfScorer, SubsetOfScoringFn,
] ]
SCORER_REGISTRY = {x.scoring_function_def.identifier: x for x in SUPPORTED_SCORERS} SCORER_REGISTRY = {x.scoring_function_def.identifier: x for x in SUPPORTED_SCORING_FNS}
class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate): class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
@ -46,10 +46,10 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
async def shutdown(self) -> None: ... async def shutdown(self) -> None: ...
async def list_scoring_functions(self) -> List[ScoringFunctionDef]: async def list_scoring_functions(self) -> List[ScoringFnDef]:
return [x.scoring_function_def for x in SUPPORTED_SCORERS] return [x.scoring_function_def for x in SUPPORTED_SCORING_FNS]
async def register_scoring_function(self, function_def: ScoringFunctionDef) -> None: async def register_scoring_function(self, function_def: ScoringFnDef) -> None:
raise NotImplementedError( raise NotImplementedError(
"Dynamically registering scoring functions is not supported" "Dynamically registering scoring functions is not supported"
) )
@ -101,9 +101,9 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
for scoring_fn_id in scoring_functions: for scoring_fn_id in scoring_functions:
if scoring_fn_id not in SCORER_REGISTRY: if scoring_fn_id not in SCORER_REGISTRY:
raise ValueError(f"Scoring function {scoring_fn_id} is not supported.") raise ValueError(f"Scoring function {scoring_fn_id} is not supported.")
scorer = SCORER_REGISTRY[scoring_fn_id]() scoring_fn = SCORER_REGISTRY[scoring_fn_id]()
score_results = scorer.score(input_rows) score_results = scoring_fn.score(input_rows)
agg_results = scorer.aggregate(score_results) agg_results = scoring_fn.aggregate(score_results)
res[scoring_fn_id] = ScoringResult( res[scoring_fn_id] = ScoringResult(
score_rows=score_results, score_rows=score_results,
aggregated_results=agg_results, aggregated_results=agg_results,

View file

@ -9,15 +9,15 @@ from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
class BaseScorer(ABC): class BaseScoringFn(ABC):
""" """
Base interface class for all meta-reference scorers. Base interface class for all meta-reference scoring_fns.
Each scorer needs to implement the following methods: Each scoring_fn needs to implement the following methods:
- score_row(self, row) - score_row(self, row)
- aggregate(self, scorer_results) - aggregate(self, scoring_fn_results)
""" """
scoring_function_def: ScoringFunctionDef scoring_function_def: ScoringFnDef
def __init__(self, *args, **kwargs) -> None: def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)

View file

@ -4,23 +4,23 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.providers.impls.meta_reference.scoring.scorer.base_scorer import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScorer, BaseScoringFn,
) )
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.impls.meta_reference.scoring.scorer.common import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
aggregate_accuracy, aggregate_accuracy,
) )
class EqualityScorer(BaseScorer): class EqualityScoringFn(BaseScoringFn):
""" """
A scorer that assigns a score of 1.0 if the input string matches the target string, and 0.0 otherwise. A scoring_fn that assigns a score of 1.0 if the input string matches the target string, and 0.0 otherwise.
""" """
scoring_function_def = ScoringFunctionDef( scoring_function_def = ScoringFnDef(
identifier="equality", identifier="equality",
description="Returns 1.0 if the input is equal to the target, 0.0 otherwise.", description="Returns 1.0 if the input is equal to the target, 0.0 otherwise.",
parameters=[], parameters=[],

View file

@ -4,23 +4,23 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.providers.impls.meta_reference.scoring.scorer.base_scorer import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScorer, BaseScoringFn,
) )
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.impls.meta_reference.scoring.scorer.common import ( from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import (
aggregate_accuracy, aggregate_accuracy,
) )
class SubsetOfScorer(BaseScorer): class SubsetOfScoringFn(BaseScoringFn):
""" """
A scorer that assigns a score of 1.0 if the expected string is included in the generated string, and 0.0 otherwise. A scoring_fn that assigns a score of 1.0 if the expected string is included in the generated string, and 0.0 otherwise.
""" """
scoring_function_def = ScoringFunctionDef( scoring_function_def = ScoringFnDef(
identifier="subset_of", identifier="subset_of",
description="Returns 1.0 if the expected is included in generated, 0.0 otherwise.", description="Returns 1.0 if the expected is included in generated, 0.0 otherwise.",
parameters=[], parameters=[],