address nits

This commit is contained in:
Xi Yan 2024-10-28 11:38:14 -07:00
parent 8627e27b17
commit e3f80fa4aa
7 changed files with 12 additions and 11 deletions

View file

@ -61,9 +61,9 @@ class MetaReferenceScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
async def list_scoring_functions(self) -> List[ScoringFnDef]:
return [
fn_defs
fn_def
for impl in self.scoring_fn_id_impls.values()
for fn_defs in impl.get_supported_scoring_fn_defs()
for fn_def in impl.get_supported_scoring_fn_defs()
]
async def register_scoring_function(self, function_def: ScoringFnDef) -> None:

View file

@ -16,7 +16,7 @@ from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.equality import (
equality_fn_def,
equality,
)
@ -28,7 +28,7 @@ class EqualityScoringFn(BaseScoringFn):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.supported_fn_defs_registry = {
equality_fn_def.identifier: equality_fn_def,
equality.identifier: equality,
}
async def score_row(

View file

@ -8,7 +8,7 @@ from llama_stack.apis.common.type_system import NumberType
from llama_stack.apis.scoring_functions import ScoringFnDef
equality_fn_def = ScoringFnDef(
equality = ScoringFnDef(
identifier="meta-reference::equality",
description="Returns 1.0 if the input is equal to the target, 0.0 otherwise.",
parameters=[],

View file

@ -22,7 +22,8 @@ System Answer: {generated_answer}
Feedback:::
Total rating:
"""
llm_as_judge_8b_correctness_fn_def = ScoringFnDef(
llm_as_judge_8b_correctness = ScoringFnDef(
identifier="meta-reference::llm_as_judge_8b_correctness",
description="Llm As Judge Scoring Function",
parameters=[],

View file

@ -8,7 +8,7 @@ from llama_stack.apis.common.type_system import NumberType
from llama_stack.apis.scoring_functions import ScoringFnDef
subset_of_fn_def = ScoringFnDef(
subset_of = ScoringFnDef(
identifier="meta-reference::subset_of",
description="Returns 1.0 if the expected is included in generated, 0.0 otherwise.",
parameters=[],

View file

@ -16,7 +16,7 @@ from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import
aggregate_average,
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.llm_as_judge_8b_correctness import (
llm_as_judge_8b_correctness_fn_def,
llm_as_judge_8b_correctness,
)
@ -29,7 +29,7 @@ class LlmAsJudgeScoringFn(BaseScoringFn):
super().__init__(*arg, **kwargs)
self.inference_api = inference_api
self.supported_fn_defs_registry = {
llm_as_judge_8b_correctness_fn_def.identifier: llm_as_judge_8b_correctness_fn_def,
llm_as_judge_8b_correctness.identifier: llm_as_judge_8b_correctness,
}
async def score_row(

View file

@ -15,7 +15,7 @@ from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.common import
)
from llama_stack.providers.impls.meta_reference.scoring.scoring_fn.fn_defs.subset_of import (
subset_of_fn_def,
subset_of,
)
@ -27,7 +27,7 @@ class SubsetOfScoringFn(BaseScoringFn):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.supported_fn_defs_registry = {
subset_of_fn_def.identifier: subset_of_fn_def,
subset_of.identifier: subset_of,
}
async def score_row(