Folder restructure for evals/datasets/scoring (#419)

* rename evals related stuff

* fix datasetio

* fix scoring test

* localfs -> LocalFS

* refactor scoring

* refactor scoring

* remove 8b_correctness scoring_fn from tests

* tests w/ eval params

* scoring fn braintrust fixture

* import
This commit is contained in:
Xi Yan 2024-11-11 17:35:40 -05:00 committed by GitHub
parent 2b7d70ba86
commit b4416b72fd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
37 changed files with 141 additions and 100 deletions

View file

@ -4,15 +4,15 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from .config import MetaReferenceDatasetIOConfig from .config import LocalFSDatasetIOConfig
async def get_provider_impl( async def get_provider_impl(
config: MetaReferenceDatasetIOConfig, config: LocalFSDatasetIOConfig,
_deps, _deps,
): ):
from .datasetio import MetaReferenceDatasetIOImpl from .datasetio import LocalFSDatasetIOImpl
impl = MetaReferenceDatasetIOImpl(config) impl = LocalFSDatasetIOImpl(config)
await impl.initialize() await impl.initialize()
return impl return impl

View file

@ -6,4 +6,4 @@
from llama_stack.apis.datasetio import * # noqa: F401, F403 from llama_stack.apis.datasetio import * # noqa: F401, F403
class MetaReferenceDatasetIOConfig(BaseModel): ... class LocalFSDatasetIOConfig(BaseModel): ...

View file

@ -15,7 +15,7 @@ from dataclasses import dataclass
from llama_stack.providers.datatypes import DatasetsProtocolPrivate from llama_stack.providers.datatypes import DatasetsProtocolPrivate
from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_url from llama_stack.providers.utils.datasetio.url_utils import get_dataframe_from_url
from .config import MetaReferenceDatasetIOConfig from .config import LocalFSDatasetIOConfig
class BaseDataset(ABC): class BaseDataset(ABC):
@ -77,8 +77,8 @@ class PandasDataframeDataset(BaseDataset):
self.df = self._validate_dataset_schema(df) self.df = self._validate_dataset_schema(df)
class MetaReferenceDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate): class LocalFSDatasetIOImpl(DatasetIO, DatasetsProtocolPrivate):
def __init__(self, config: MetaReferenceDatasetIOConfig) -> None: def __init__(self, config: LocalFSDatasetIOConfig) -> None:
self.config = config self.config = config
# local registry for keeping track of datasets within the provider # local registry for keeping track of datasets within the provider
self.dataset_infos = {} self.dataset_infos = {}

View file

@ -9,14 +9,13 @@ from llama_models.llama3.api.datatypes import * # noqa: F403
from .....apis.common.job_types import Job from .....apis.common.job_types import Job
from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatus from .....apis.eval.eval import Eval, EvalTaskConfig, EvaluateResponse, JobStatus
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from tqdm import tqdm
from llama_stack.apis.datasetio import DatasetIO from llama_stack.apis.datasetio import DatasetIO
from llama_stack.apis.datasets import Datasets from llama_stack.apis.datasets import Datasets
from llama_stack.apis.eval_tasks import EvalTaskDef from llama_stack.apis.eval_tasks import EvalTaskDef
from llama_stack.apis.inference import Inference from llama_stack.apis.inference import Inference
from llama_stack.apis.scoring import Scoring from llama_stack.apis.scoring import Scoring
from llama_stack.providers.datatypes import EvalTasksProtocolPrivate from llama_stack.providers.datatypes import EvalTasksProtocolPrivate
from tqdm import tqdm
from .config import MetaReferenceEvalConfig from .config import MetaReferenceEvalConfig

View file

@ -16,9 +16,8 @@ from llama_stack.apis.datasets import * # noqa: F403
from autoevals.llm import Factuality from autoevals.llm import Factuality
from autoevals.ragas import AnswerCorrectness from autoevals.ragas import AnswerCorrectness
from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate from llama_stack.providers.datatypes import ScoringFunctionsProtocolPrivate
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import (
aggregate_average, from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_average
)
from .config import BraintrustScoringConfig from .config import BraintrustScoringConfig
from .scoring_fn.fn_defs.answer_correctness import answer_correctness_fn_def from .scoring_fn.fn_defs.answer_correctness import answer_correctness_fn_def

View file

@ -4,20 +4,14 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import ( from .base_scoring_fn import BaseScoringFn
BaseScoringFn,
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import ( from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_accuracy
aggregate_accuracy,
)
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.equality import ( from .fn_defs.equality import equality
equality,
)
class EqualityScoringFn(BaseScoringFn): class EqualityScoringFn(BaseScoringFn):

View file

@ -0,0 +1,15 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from llama_stack.apis.common.type_system import NumberType
from llama_stack.apis.scoring_functions import ScoringFnDef
llm_as_judge_base = ScoringFnDef(
identifier="meta-reference::llm_as_judge_base",
description="Llm As Judge Scoring Function",
return_type=NumberType(),
)

View file

@ -4,20 +4,16 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.inference.inference import Inference from llama_stack.apis.inference.inference import Inference
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import (
BaseScoringFn, from .base_scoring_fn import BaseScoringFn
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
import re import re
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import ( from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_average
aggregate_average,
) from .fn_defs.llm_as_judge_base import llm_as_judge_base
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.llm_as_judge_8b_correctness import (
llm_as_judge_8b_correctness,
)
class LlmAsJudgeScoringFn(BaseScoringFn): class LlmAsJudgeScoringFn(BaseScoringFn):
@ -29,7 +25,7 @@ class LlmAsJudgeScoringFn(BaseScoringFn):
super().__init__(*arg, **kwargs) super().__init__(*arg, **kwargs)
self.inference_api = inference_api self.inference_api = inference_api
self.supported_fn_defs_registry = { self.supported_fn_defs_registry = {
llm_as_judge_8b_correctness.identifier: llm_as_judge_8b_correctness, llm_as_judge_base.identifier: llm_as_judge_base,
} }
async def score_row( async def score_row(

View file

@ -9,7 +9,7 @@ from .base_scoring_fn import BaseScoringFn
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from .common import aggregate_accuracy from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_accuracy
from .fn_defs.regex_parser_multiple_choice_answer import ( from .fn_defs.regex_parser_multiple_choice_answer import (
regex_parser_multiple_choice_answer, regex_parser_multiple_choice_answer,

View file

@ -4,19 +4,13 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.base_scoring_fn import ( from .base_scoring_fn import BaseScoringFn
BaseScoringFn,
)
from llama_stack.apis.scoring_functions import * # noqa: F401, F403 from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403 from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import * # noqa: F403 from llama_stack.apis.common.type_system import * # noqa: F403
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.common import ( from llama_stack.providers.utils.scoring.aggregation_utils import aggregate_accuracy
aggregate_accuracy,
)
from llama_stack.providers.inline.meta_reference.scoring.scoring_fn.fn_defs.subset_of import ( from .fn_defs.subset_of import subset_of
subset_of,
)
class SubsetOfScoringFn(BaseScoringFn): class SubsetOfScoringFn(BaseScoringFn):

View file

@ -13,10 +13,10 @@ def available_providers() -> List[ProviderSpec]:
return [ return [
InlineProviderSpec( InlineProviderSpec(
api=Api.datasetio, api=Api.datasetio,
provider_type="meta-reference", provider_type="localfs",
pip_packages=["pandas"], pip_packages=["pandas"],
module="llama_stack.providers.inline.meta_reference.datasetio", module="llama_stack.providers.inline.datasetio.localfs",
config_class="llama_stack.providers.inline.meta_reference.datasetio.MetaReferenceDatasetIOConfig", config_class="llama_stack.providers.inline.datasetio.localfs.LocalFSDatasetIOConfig",
api_dependencies=[], api_dependencies=[],
), ),
remote_provider_spec( remote_provider_spec(

View file

@ -15,8 +15,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.eval, api=Api.eval,
provider_type="meta-reference", provider_type="meta-reference",
pip_packages=[], pip_packages=[],
module="llama_stack.providers.inline.meta_reference.eval", module="llama_stack.providers.inline.eval.meta_reference",
config_class="llama_stack.providers.inline.meta_reference.eval.MetaReferenceEvalConfig", config_class="llama_stack.providers.inline.eval.meta_reference.MetaReferenceEvalConfig",
api_dependencies=[ api_dependencies=[
Api.datasetio, Api.datasetio,
Api.datasets, Api.datasets,

View file

@ -15,8 +15,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.scoring, api=Api.scoring,
provider_type="meta-reference", provider_type="meta-reference",
pip_packages=[], pip_packages=[],
module="llama_stack.providers.inline.meta_reference.scoring", module="llama_stack.providers.inline.scoring.meta_reference",
config_class="llama_stack.providers.inline.meta_reference.scoring.MetaReferenceScoringConfig", config_class="llama_stack.providers.inline.scoring.meta_reference.MetaReferenceScoringConfig",
api_dependencies=[ api_dependencies=[
Api.datasetio, Api.datasetio,
Api.datasets, Api.datasets,
@ -27,8 +27,8 @@ def available_providers() -> List[ProviderSpec]:
api=Api.scoring, api=Api.scoring,
provider_type="braintrust", provider_type="braintrust",
pip_packages=["autoevals", "openai"], pip_packages=["autoevals", "openai"],
module="llama_stack.providers.inline.braintrust.scoring", module="llama_stack.providers.inline.scoring.braintrust",
config_class="llama_stack.providers.inline.braintrust.scoring.BraintrustScoringConfig", config_class="llama_stack.providers.inline.scoring.braintrust.BraintrustScoringConfig",
api_dependencies=[ api_dependencies=[
Api.datasetio, Api.datasetio,
Api.datasets, Api.datasets,

View file

@ -19,12 +19,12 @@ def datasetio_remote() -> ProviderFixture:
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def datasetio_meta_reference() -> ProviderFixture: def datasetio_localfs() -> ProviderFixture:
return ProviderFixture( return ProviderFixture(
providers=[ providers=[
Provider( Provider(
provider_id="meta-reference", provider_id="localfs",
provider_type="meta-reference", provider_type="localfs",
config={}, config={},
) )
], ],
@ -44,7 +44,7 @@ def datasetio_huggingface() -> ProviderFixture:
) )
DATASETIO_FIXTURES = ["meta_reference", "remote", "huggingface"] DATASETIO_FIXTURES = ["localfs", "remote", "huggingface"]
@pytest_asyncio.fixture(scope="session") @pytest_asyncio.fixture(scope="session")

View file

@ -4,10 +4,6 @@
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from llama_stack.apis.scoring_functions import * # noqa: F401, F403
from llama_stack.apis.scoring import * # noqa: F401, F403
from llama_stack.apis.common.type_system import NumberType
JUDGE_PROMPT = """ JUDGE_PROMPT = """
You will be given a question, a expected_answer, and a system_answer. You will be given a question, a expected_answer, and a system_answer.
Your task is to provide a 'total rating' scoring how well the system_answer answers compared with ground truth in expected_answer in terms of factual correctness to the question. Your task is to provide a 'total rating' scoring how well the system_answer answers compared with ground truth in expected_answer in terms of factual correctness to the question.
@ -22,18 +18,3 @@ System Answer: {generated_answer}
Feedback::: Feedback:::
Total rating: Total rating:
""" """
llm_as_judge_8b_correctness = ScoringFnDef(
identifier="meta-reference::llm_as_judge_8b_correctness",
description="Llm As Judge Scoring Function",
return_type=NumberType(),
params=LLMAsJudgeScoringFnParams(
prompt_template=JUDGE_PROMPT,
judge_model="Llama3.1-8B-Instruct",
judge_score_regexes=[
r"Total rating: (\d+)",
r"rating: (\d+)",
r"Rating: (\d+)",
],
),
)

View file

@ -19,9 +19,10 @@ from llama_stack.apis.eval.eval import (
EvalTaskDefWithProvider, EvalTaskDefWithProvider,
ModelCandidate, ModelCandidate,
) )
from llama_stack.apis.scoring_functions import LLMAsJudgeScoringFnParams
from llama_stack.distribution.datatypes import Api from llama_stack.distribution.datatypes import Api
from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset
from .constants import JUDGE_PROMPT
# How to run this test: # How to run this test:
# #
@ -65,7 +66,7 @@ class Testeval:
assert len(rows.rows) == 3 assert len(rows.rows) == 3
scoring_functions = [ scoring_functions = [
"meta-reference::llm_as_judge_8b_correctness", "meta-reference::llm_as_judge_base",
"meta-reference::equality", "meta-reference::equality",
] ]
task_id = "meta-reference::app_eval" task_id = "meta-reference::app_eval"
@ -85,11 +86,22 @@ class Testeval:
model="Llama3.2-3B-Instruct", model="Llama3.2-3B-Instruct",
sampling_params=SamplingParams(), sampling_params=SamplingParams(),
), ),
scoring_params={
"meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
judge_model="Llama3.1-8B-Instruct",
prompt_template=JUDGE_PROMPT,
judge_score_regexes=[
r"Total rating: (\d+)",
r"rating: (\d+)",
r"Rating: (\d+)",
],
)
},
), ),
) )
assert len(response.generations) == 3 assert len(response.generations) == 3
assert "meta-reference::llm_as_judge_8b_correctness" in response.scores
assert "meta-reference::equality" in response.scores assert "meta-reference::equality" in response.scores
assert "meta-reference::llm_as_judge_base" in response.scores
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_eval_run_eval(self, eval_stack): async def test_eval_run_eval(self, eval_stack):
@ -109,7 +121,6 @@ class Testeval:
) )
scoring_functions = [ scoring_functions = [
"meta-reference::llm_as_judge_8b_correctness",
"meta-reference::subset_of", "meta-reference::subset_of",
] ]
@ -138,7 +149,6 @@ class Testeval:
assert eval_response is not None assert eval_response is not None
assert len(eval_response.generations) == 5 assert len(eval_response.generations) == 5
assert "meta-reference::subset_of" in eval_response.scores assert "meta-reference::subset_of" in eval_response.scores
assert "meta-reference::llm_as_judge_8b_correctness" in eval_response.scores
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_eval_run_benchmark_eval(self, eval_stack): async def test_eval_run_benchmark_eval(self, eval_stack):

View file

@ -16,7 +16,7 @@ DEFAULT_PROVIDER_COMBINATIONS = [
pytest.param( pytest.param(
{ {
"scoring": "meta_reference", "scoring": "meta_reference",
"datasetio": "meta_reference", "datasetio": "localfs",
"inference": "fireworks", "inference": "fireworks",
}, },
id="meta_reference_scoring_fireworks_inference", id="meta_reference_scoring_fireworks_inference",
@ -25,12 +25,21 @@ DEFAULT_PROVIDER_COMBINATIONS = [
pytest.param( pytest.param(
{ {
"scoring": "meta_reference", "scoring": "meta_reference",
"datasetio": "meta_reference", "datasetio": "localfs",
"inference": "together", "inference": "together",
}, },
id="meta_reference_scoring_together_inference", id="meta_reference_scoring_together_inference",
marks=pytest.mark.meta_reference_scoring_together_inference, marks=pytest.mark.meta_reference_scoring_together_inference,
), ),
pytest.param(
{
"scoring": "braintrust",
"datasetio": "localfs",
"inference": "together",
},
id="braintrust_scoring_together_inference",
marks=pytest.mark.braintrust_scoring_together_inference,
),
] ]
@ -38,6 +47,7 @@ def pytest_configure(config):
for fixture_name in [ for fixture_name in [
"meta_reference_scoring_fireworks_inference", "meta_reference_scoring_fireworks_inference",
"meta_reference_scoring_together_inference", "meta_reference_scoring_together_inference",
"braintrust_scoring_together_inference",
]: ]:
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",

View file

@ -31,7 +31,20 @@ def scoring_meta_reference() -> ProviderFixture:
) )
SCORING_FIXTURES = ["meta_reference", "remote"] @pytest.fixture(scope="session")
def scoring_braintrust() -> ProviderFixture:
return ProviderFixture(
providers=[
Provider(
provider_id="braintrust",
provider_type="braintrust",
config={},
)
],
)
SCORING_FIXTURES = ["meta_reference", "remote", "braintrust"]
@pytest_asyncio.fixture(scope="session") @pytest_asyncio.fixture(scope="session")
@ -52,9 +65,4 @@ async def scoring_stack(request):
provider_data, provider_data,
) )
return ( return impls
impls[Api.scoring],
impls[Api.scoring_functions],
impls[Api.datasetio],
impls[Api.datasets],
)

View file

@ -8,7 +8,7 @@
import pytest import pytest
from llama_stack.apis.scoring_functions import * # noqa: F403 from llama_stack.apis.scoring_functions import * # noqa: F403
from llama_stack.distribution.datatypes import Api
from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset from llama_stack.providers.tests.datasetio.test_datasetio import register_dataset
# How to run this test: # How to run this test:
@ -23,20 +23,36 @@ class TestScoring:
async def test_scoring_functions_list(self, scoring_stack): async def test_scoring_functions_list(self, scoring_stack):
# NOTE: this needs you to ensure that you are starting from a clean state # NOTE: this needs you to ensure that you are starting from a clean state
# but so far we don't have an unregister API unfortunately, so be careful # but so far we don't have an unregister API unfortunately, so be careful
_, scoring_functions_impl, _, _ = scoring_stack scoring_functions_impl = scoring_stack[Api.scoring_functions]
response = await scoring_functions_impl.list_scoring_functions() response = await scoring_functions_impl.list_scoring_functions()
assert isinstance(response, list) assert isinstance(response, list)
assert len(response) > 0 assert len(response) > 0
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_scoring_score(self, scoring_stack): async def test_scoring_score(self, scoring_stack):
scoring_impl, scoring_functions_impl, datasetio_impl, datasets_impl = ( (
scoring_stack scoring_impl,
scoring_functions_impl,
datasetio_impl,
datasets_impl,
models_impl,
) = (
scoring_stack[Api.scoring],
scoring_stack[Api.scoring_functions],
scoring_stack[Api.datasetio],
scoring_stack[Api.datasets],
scoring_stack[Api.models],
) )
await register_dataset(datasets_impl) await register_dataset(datasets_impl)
response = await datasets_impl.list_datasets() response = await datasets_impl.list_datasets()
assert len(response) == 1 assert len(response) == 1
for model_id in ["Llama3.2-3B-Instruct", "Llama3.1-8B-Instruct"]:
await models_impl.register_model(
model_id=model_id,
provider_id="",
)
# scoring individual rows # scoring individual rows
rows = await datasetio_impl.get_rows_paginated( rows = await datasetio_impl.get_rows_paginated(
dataset_id="test_dataset", dataset_id="test_dataset",
@ -44,10 +60,11 @@ class TestScoring:
) )
assert len(rows.rows) == 3 assert len(rows.rows) == 3
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
scoring_functions = { scoring_functions = {
"meta-reference::llm_as_judge_8b_correctness": None, scoring_fns_list[0].identifier: None,
"meta-reference::equality": None,
} }
response = await scoring_impl.score( response = await scoring_impl.score(
input_rows=rows.rows, input_rows=rows.rows,
scoring_functions=scoring_functions, scoring_functions=scoring_functions,
@ -69,13 +86,34 @@ class TestScoring:
@pytest.mark.asyncio @pytest.mark.asyncio
async def test_scoring_score_with_params(self, scoring_stack): async def test_scoring_score_with_params(self, scoring_stack):
scoring_impl, scoring_functions_impl, datasetio_impl, datasets_impl = ( (
scoring_stack scoring_impl,
scoring_functions_impl,
datasetio_impl,
datasets_impl,
models_impl,
) = (
scoring_stack[Api.scoring],
scoring_stack[Api.scoring_functions],
scoring_stack[Api.datasetio],
scoring_stack[Api.datasets],
scoring_stack[Api.models],
) )
await register_dataset(datasets_impl) await register_dataset(datasets_impl)
response = await datasets_impl.list_datasets() response = await datasets_impl.list_datasets()
assert len(response) == 1 assert len(response) == 1
for model_id in ["Llama3.1-405B-Instruct"]:
await models_impl.register_model(
model_id=model_id,
provider_id="",
)
scoring_fns_list = await scoring_functions_impl.list_scoring_functions()
provider_id = scoring_fns_list[0].provider_id
if provider_id == "braintrust":
pytest.skip("Braintrust provider does not support scoring with params")
# scoring individual rows # scoring individual rows
rows = await datasetio_impl.get_rows_paginated( rows = await datasetio_impl.get_rows_paginated(
dataset_id="test_dataset", dataset_id="test_dataset",
@ -84,7 +122,7 @@ class TestScoring:
assert len(rows.rows) == 3 assert len(rows.rows) == 3
scoring_functions = { scoring_functions = {
"meta-reference::llm_as_judge_8b_correctness": LLMAsJudgeScoringFnParams( "meta-reference::llm_as_judge_base": LLMAsJudgeScoringFnParams(
judge_model="Llama3.1-405B-Instruct", judge_model="Llama3.1-405B-Instruct",
prompt_template="Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9.", prompt_template="Output a number response in the following format: Score: <number>, where <number> is the number between 0 and 9.",
judge_score_regexes=[r"Score: (\d+)"], judge_score_regexes=[r"Score: (\d+)"],

View file

@ -3,13 +3,10 @@
# #
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
from pathlib import Path
from typing import Any, Dict, List from typing import Any, Dict, List
from llama_stack.apis.scoring import ScoringResultRow from llama_stack.apis.scoring import ScoringResultRow
FN_DEFS_PATH = Path(__file__).parent / "fn_defs"
def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]: def aggregate_accuracy(scoring_results: List[ScoringResultRow]) -> Dict[str, Any]:
num_correct = sum(result["score"] for result in scoring_results) num_correct = sum(result["score"] for result in scoring_results)