From 979cd4cd44328320dea99f56a83f8d4c990ea6aa Mon Sep 17 00:00:00 2001 From: Xi Yan Date: Tue, 5 Nov 2024 16:20:16 -0800 Subject: [PATCH] naming fix --- llama_stack/apis/eval/eval.py | 2 +- .../scoring_functions/scoring_functions.py | 22 +++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/llama_stack/apis/eval/eval.py b/llama_stack/apis/eval/eval.py index 5dfa0d019..df56f5325 100644 --- a/llama_stack/apis/eval/eval.py +++ b/llama_stack/apis/eval/eval.py @@ -46,7 +46,7 @@ class BenchmarkEvalTaskConfig(BaseModel): class AppEvalTaskConfig(BaseModel): type: Literal["app"] = "app" eval_candidate: EvalCandidate # type: ignore - scoring_functions_config: Dict[str, ScoringFnConfig] = Field( + scoring_functions_params: Dict[str, ScoringFnParams] = Field( # type: ignore description="Map between scoring function id and parameters", default_factory=dict, ) diff --git a/llama_stack/apis/scoring_functions/scoring_functions.py b/llama_stack/apis/scoring_functions/scoring_functions.py index c5b48617e..b2f28e374 100644 --- a/llama_stack/apis/scoring_functions/scoring_functions.py +++ b/llama_stack/apis/scoring_functions/scoring_functions.py @@ -28,11 +28,11 @@ from llama_stack.apis.common.type_system import ParamType @json_schema_type class ScoringConfigType(Enum): llm_as_judge = "llm_as_judge" - answer_parsing = "answer_parsing" + regex_parser = "regex_parser" @json_schema_type -class LLMAsJudgeScoringFnConfig(BaseModel): +class LLMAsJudgeScoringFnParams(BaseModel): type: Literal[ScoringConfigType.llm_as_judge.value] = ( # type: ignore ScoringConfigType.llm_as_judge.value ) @@ -42,20 +42,20 @@ class LLMAsJudgeScoringFnConfig(BaseModel): @json_schema_type -class AnswerParsingScoringFnConfig(BaseModel): - type: Literal[ScoringConfigType.answer_parsing.value] = ( # type: ignore - ScoringConfigType.answer_parsing.value +class RegexParserScoringFnParams(BaseModel): + type: Literal[ScoringConfigType.regex_parser.value] = ( # type: ignore + ScoringConfigType.regex_parser.value ) - parsing_regex: Optional[List[str]] = Field( + parsing_regexes: Optional[List[str]] = Field( description="Regex to extract the answer from generated response", default_factory=list, ) -ScoringFnConfig = Annotated[ +ScoringFnParams = Annotated[ Union[ - LLMAsJudgeScoringFnConfig, - AnswerParsingScoringFnConfig, + LLMAsJudgeScoringFnParams, + RegexParserScoringFnParams, ], Field(discriminator="type"), ] @@ -72,8 +72,8 @@ class ScoringFnDef(BaseModel): return_type: ParamType = Field( description="The return type of the deterministic function", ) - config: Optional[ScoringFnConfig] = Field( # type: ignore - description="The configuration for the scoring function for benchmark eval, we could override this for app eval", + params: Optional[ScoringFnParams] = Field( # type: ignore + description="The parameters for the scoring function for benchmark eval, we could override this for app eval", default=None, ) # We can optionally add information here to support packaging of code, etc.