mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 08:44:44 +00:00
aggregation function config
This commit is contained in:
parent
fbc3888fd7
commit
1077c521b1
8 changed files with 79 additions and 28 deletions
|
@ -113,7 +113,9 @@ class BasicScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
|||
score_results = await scoring_fn.score(
|
||||
input_rows, scoring_fn_id, scoring_fn_params
|
||||
)
|
||||
agg_results = await scoring_fn.aggregate(score_results, scoring_fn_params)
|
||||
agg_results = await scoring_fn.aggregate(
|
||||
score_results, scoring_fn_id, scoring_fn_params
|
||||
)
|
||||
res[scoring_fn_id] = ScoringResult(
|
||||
score_rows=score_results,
|
||||
aggregated_results=agg_results,
|
||||
|
|
|
@ -47,13 +47,18 @@ class EqualityScoringFn(BaseScoringFn):
|
|||
async def aggregate(
|
||||
self,
|
||||
scoring_results: List[ScoringResultRow],
|
||||
scoring_fn_identifier: Optional[str] = None,
|
||||
scoring_params: Optional[ScoringFnParams] = None,
|
||||
) -> Dict[str, Any]:
|
||||
params = self.supported_fn_defs_registry[scoring_fn_identifier].params
|
||||
if scoring_params is not None:
|
||||
params = scoring_params
|
||||
|
||||
aggregation_functions = [AggregationFunctionType.accuracy]
|
||||
if (
|
||||
scoring_params
|
||||
and hasattr(scoring_params, "aggregation_functions")
|
||||
and scoring_params.aggregation_functions
|
||||
params
|
||||
and hasattr(params, "aggregation_functions")
|
||||
and params.aggregation_functions
|
||||
):
|
||||
aggregation_functions.extend(scoring_params.aggregation_functions)
|
||||
aggregation_functions.extend(params.aggregation_functions)
|
||||
return aggregate_metrics(scoring_results, aggregation_functions)
|
||||
|
|
|
@ -69,13 +69,18 @@ class RegexParserScoringFn(BaseScoringFn):
|
|||
async def aggregate(
|
||||
self,
|
||||
scoring_results: List[ScoringResultRow],
|
||||
scoring_fn_identifier: Optional[str] = None,
|
||||
scoring_params: Optional[ScoringFnParams] = None,
|
||||
) -> Dict[str, Any]:
|
||||
params = self.supported_fn_defs_registry[scoring_fn_identifier].params
|
||||
if scoring_params is not None:
|
||||
params = scoring_params
|
||||
|
||||
aggregation_functions = [AggregationFunctionType.accuracy]
|
||||
if (
|
||||
scoring_params
|
||||
and hasattr(scoring_params, "aggregation_functions")
|
||||
and scoring_params.aggregation_functions
|
||||
params
|
||||
and hasattr(params, "aggregation_functions")
|
||||
and params.aggregation_functions
|
||||
):
|
||||
aggregation_functions.extend(scoring_params.aggregation_functions)
|
||||
aggregation_functions.extend(params.aggregation_functions)
|
||||
return aggregate_metrics(scoring_results, aggregation_functions)
|
||||
|
|
|
@ -41,13 +41,18 @@ class SubsetOfScoringFn(BaseScoringFn):
|
|||
async def aggregate(
|
||||
self,
|
||||
scoring_results: List[ScoringResultRow],
|
||||
scoring_fn_identifier: Optional[str] = None,
|
||||
scoring_params: Optional[ScoringFnParams] = None,
|
||||
) -> Dict[str, Any]:
|
||||
params = self.supported_fn_defs_registry[scoring_fn_identifier].params
|
||||
if scoring_params is not None:
|
||||
params = scoring_params
|
||||
|
||||
aggregation_functions = [AggregationFunctionType.accuracy]
|
||||
if (
|
||||
scoring_params
|
||||
and hasattr(scoring_params, "aggregation_functions")
|
||||
and scoring_params.aggregation_functions
|
||||
params
|
||||
and hasattr(params, "aggregation_functions")
|
||||
and params.aggregation_functions
|
||||
):
|
||||
aggregation_functions.extend(scoring_params.aggregation_functions)
|
||||
aggregation_functions.extend(params.aggregation_functions)
|
||||
return aggregate_metrics(scoring_results, aggregation_functions)
|
||||
|
|
|
@ -120,7 +120,9 @@ class LlmAsJudgeScoringImpl(Scoring, ScoringFunctionsProtocolPrivate):
|
|||
score_results = await scoring_fn.score(
|
||||
input_rows, scoring_fn_id, scoring_fn_params
|
||||
)
|
||||
agg_results = await scoring_fn.aggregate(score_results, scoring_fn_params)
|
||||
agg_results = await scoring_fn.aggregate(
|
||||
score_results, scoring_fn_id, scoring_fn_params
|
||||
)
|
||||
res[scoring_fn_id] = ScoringResult(
|
||||
score_rows=score_results,
|
||||
aggregated_results=agg_results,
|
||||
|
|
|
@ -3,13 +3,18 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import re
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from llama_stack.apis.inference.inference import Inference
|
||||
|
||||
from llama_stack.apis.scoring import ScoringResultRow
|
||||
from llama_stack.apis.scoring_functions import ScoringFnParams
|
||||
|
||||
from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_metrics
|
||||
|
||||
from llama_stack.providers.utils.scoring.base_scoring_fn import BaseScoringFn
|
||||
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
|
||||
from llama_stack.apis.scoring import * # noqa: F401, F403
|
||||
from llama_stack.apis.common.type_system import * # noqa: F403
|
||||
import re
|
||||
|
||||
from .fn_defs.llm_as_judge_405b_simpleqa import llm_as_judge_405b_simpleqa
|
||||
|
||||
|
@ -89,8 +94,18 @@ class LlmAsJudgeScoringFn(BaseScoringFn):
|
|||
async def aggregate(
|
||||
self,
|
||||
scoring_results: List[ScoringResultRow],
|
||||
scoring_fn_identifier: Optional[str] = None,
|
||||
scoring_params: Optional[ScoringFnParams] = None,
|
||||
) -> Dict[str, Any]:
|
||||
print(f"scoring_params: {scoring_params}")
|
||||
# TODO: this needs to be config based aggregation, and only useful w/ Jobs API
|
||||
return {}
|
||||
params = self.supported_fn_defs_registry[scoring_fn_identifier].params
|
||||
if scoring_params is not None:
|
||||
params = scoring_params
|
||||
|
||||
aggregation_functions = []
|
||||
if (
|
||||
params
|
||||
and hasattr(params, "aggregation_functions")
|
||||
and params.aggregation_functions
|
||||
):
|
||||
aggregation_functions.extend(params.aggregation_functions)
|
||||
return aggregate_metrics(scoring_results, aggregation_functions)
|
||||
|
|
|
@ -23,6 +23,11 @@ from llama_stack.providers.tests.datasetio.test_datasetio import register_datase
|
|||
# -v -s --tb=short --disable-warnings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_judge_prompt_template():
|
||||
return "Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9."
|
||||
|
||||
|
||||
class TestScoring:
|
||||
@pytest.mark.asyncio
|
||||
async def test_scoring_functions_list(self, scoring_stack):
|
||||
|
@ -97,7 +102,9 @@ class TestScoring:
|
|||
assert len(response.results[x].score_rows) == 5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scoring_score_with_params(self, scoring_stack):
|
||||
async def test_scoring_score_with_params_llm_as_judge(
|
||||
self, scoring_stack, sample_judge_prompt_template
|
||||
):
|
||||
(
|
||||
scoring_impl,
|
||||
scoring_functions_impl,
|
||||
|
@ -136,8 +143,9 @@ class TestScoring:
|
|||
scoring_functions = {
|
||||
"llm-as-judge::base": LLMAsJudgeScoringFnParams(
|
||||
judge_model="Llama3.1-405B-Instruct",
|
||||
prompt_template="Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9.",
|
||||
prompt_template=sample_judge_prompt_template,
|
||||
judge_score_regexes=[r"Score: (\d+)"],
|
||||
aggregation_functions=[AggregationFunctionType.categorical_count],
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -161,7 +169,9 @@ class TestScoring:
|
|||
assert len(response.results[x].score_rows) == 5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scoring_score_with_aggregation_functions(self, scoring_stack):
|
||||
async def test_scoring_score_with_aggregation_functions(
|
||||
self, scoring_stack, sample_judge_prompt_template
|
||||
):
|
||||
(
|
||||
scoring_impl,
|
||||
scoring_functions_impl,
|
||||
|
@ -183,7 +193,6 @@ class TestScoring:
|
|||
assert len(rows.rows) == 3
|
||||
|
||||
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
|
||||
provider_id = scoring_fns_list[0].provider_id
|
||||
scoring_functions = {}
|
||||
aggr_fns = [
|
||||
AggregationFunctionType.accuracy,
|
||||
|
@ -193,8 +202,12 @@ class TestScoring:
|
|||
]
|
||||
for x in scoring_fns_list:
|
||||
if x.provider_id == "llm-as-judge":
|
||||
aggr_fns = [AggregationFunctionType.categorical_count]
|
||||
scoring_functions[x.identifier] = LLMAsJudgeScoringFnParams(
|
||||
aggregation_functions=[AggregationFunctionType.categorical_count],
|
||||
judge_model="Llama3.1-405B-Instruct",
|
||||
prompt_template=sample_judge_prompt_template,
|
||||
judge_score_regexes=[r"Score: (\d+)"],
|
||||
aggregation_functions=aggr_fns,
|
||||
)
|
||||
elif x.provider_id == "basic":
|
||||
if "regex_parser" in x.identifier:
|
||||
|
@ -212,6 +225,9 @@ class TestScoring:
|
|||
input_rows=rows.rows,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
from rich.pretty import pprint
|
||||
|
||||
pprint(response)
|
||||
assert len(response.results) == len(scoring_functions)
|
||||
for x in scoring_functions:
|
||||
assert x in response.results
|
||||
assert len(response.results[x].score_rows) == len(rows.rows)
|
||||
assert len(response.results[x].aggregated_results) == len(aggr_fns)
|
||||
|
|
|
@ -48,6 +48,7 @@ class BaseScoringFn(ABC):
|
|||
async def aggregate(
|
||||
self,
|
||||
scoring_results: List[ScoringResultRow],
|
||||
scoring_fn_identifier: Optional[str] = None,
|
||||
scoring_params: Optional[ScoringFnParams] = None,
|
||||
) -> Dict[str, Any]:
|
||||
raise NotImplementedError()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue