mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 07:14:20 +00:00
input query optional input for braintrust scorer
This commit is contained in:
parent
7b5895003a
commit
3c29108b6e
4 changed files with 8 additions and 3 deletions
|
@ -118,7 +118,7 @@ async def run_main(host: str, port: int):
|
||||||
response = await client.run_scorer(
|
response = await client.run_scorer(
|
||||||
dataset_config=EvaluateDatasetConfig(
|
dataset_config=EvaluateDatasetConfig(
|
||||||
dataset_identifier="Llama-3.1-8B-Instruct-evals__mmlu_pro__details",
|
dataset_identifier="Llama-3.1-8B-Instruct-evals__mmlu_pro__details",
|
||||||
# row_limit=10,
|
row_limit=10,
|
||||||
),
|
),
|
||||||
eval_scoring_config=EvaluateScoringConfig(
|
eval_scoring_config=EvaluateScoringConfig(
|
||||||
scorer_config_list=[
|
scorer_config_list=[
|
||||||
|
|
|
@ -16,7 +16,7 @@ ScorerRegistry = Registry[BaseScorer]()
|
||||||
SCORER_REGISTRY = {
|
SCORER_REGISTRY = {
|
||||||
"accuracy": AccuracyScorer,
|
"accuracy": AccuracyScorer,
|
||||||
"random": RandomScorer,
|
"random": RandomScorer,
|
||||||
"braintrust::factuality": BrainTrustFactualityScorer,
|
"braintrust::factuality": BraintrustFactualityScorer,
|
||||||
"braintrust::answer-correctness": BraintrustAnswerCorrectnessScorer,
|
"braintrust::answer-correctness": BraintrustAnswerCorrectnessScorer,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,11 +33,15 @@ class RunScoringTask(BaseTask):
|
||||||
for x in dataset:
|
for x in dataset:
|
||||||
expected_answer = x.data["expected_answer"]
|
expected_answer = x.data["expected_answer"]
|
||||||
generated_answer = x.data["generated_answer"]
|
generated_answer = x.data["generated_answer"]
|
||||||
|
input_query = None
|
||||||
|
if "input_query" in x.data:
|
||||||
|
input_query = x.data["input_query"]
|
||||||
|
|
||||||
scorer_inputs.append(
|
scorer_inputs.append(
|
||||||
ScorerInputSample(
|
ScorerInputSample(
|
||||||
expected_answer=expected_answer,
|
expected_answer=expected_answer,
|
||||||
generated_answer=generated_answer,
|
generated_answer=generated_answer,
|
||||||
|
input_query=input_query,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -74,7 +78,6 @@ class RunScoringTask(BaseTask):
|
||||||
)
|
)
|
||||||
|
|
||||||
scorer_results = scorer.score(postprocessed)
|
scorer_results = scorer.score(postprocessed)
|
||||||
cprint(scorer_results, "magenta")
|
|
||||||
eval_result = scorer.aggregate_results(scorer_results)
|
eval_result = scorer.aggregate_results(scorer_results)
|
||||||
|
|
||||||
return eval_result
|
return eval_result
|
||||||
|
|
|
@ -20,6 +20,8 @@ def available_providers() -> List[ProviderSpec]:
|
||||||
"pandas",
|
"pandas",
|
||||||
"scikit-learn",
|
"scikit-learn",
|
||||||
"datasets",
|
"datasets",
|
||||||
|
"numpy",
|
||||||
|
"autoevals",
|
||||||
],
|
],
|
||||||
module="llama_stack.providers.impls.meta_reference.evals",
|
module="llama_stack.providers.impls.meta_reference.evals",
|
||||||
config_class="llama_stack.providers.impls.meta_reference.evals.MetaReferenceEvalsImplConfig",
|
config_class="llama_stack.providers.impls.meta_reference.evals.MetaReferenceEvalsImplConfig",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue