feat(eval api): (2.1/n) fix resolver for benchmark routing table + fix precommit (#1691)

# What does this PR do?
- fixes routing table so that `llama stack run` works
- fixes pre-commit
- one of many fixes to address implementation fix

[//]: # (If resolving an issue, uncomment and update the line below)
[//]: # (Closes #[issue-number])

## Test Plan
```
llama stack run
```

[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-18 21:09:49 -07:00 committed by GitHub
parent bf135f38b1
commit 08c0c5505e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 31 additions and 26 deletions

View file

@ -466,35 +466,38 @@ class BenchmarksRoutingTable(CommonRoutingTableImpl, Benchmarks):
raise ValueError(f"Benchmark '{benchmark_id}' not found") raise ValueError(f"Benchmark '{benchmark_id}' not found")
return benchmark return benchmark
async def unregister_benchmark(self, benchmark_id: str) -> None:
benchmark = await self.get_benchmark(benchmark_id)
if benchmark is None:
raise ValueError(f"Benchmark {benchmark_id} not found")
await self.unregister_object(benchmark)
async def register_benchmark( async def register_benchmark(
self, self,
benchmark_id: str,
dataset_id: str, dataset_id: str,
scoring_functions: List[str], grader_ids: List[str],
benchmark_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None,
provider_benchmark_id: Optional[str] = None, ) -> Benchmark:
provider_id: Optional[str] = None,
) -> None:
if metadata is None: if metadata is None:
metadata = {} metadata = {}
if provider_id is None:
if len(self.impls_by_provider_id) == 1: # TODO (xiyan): we will need a way to infer provider_id for evaluation
# keep it as meta-reference for now
if len(self.impls_by_provider_id) == 0:
raise ValueError("No evaluation providers available. Please configure an evaluation provider.")
provider_id = list(self.impls_by_provider_id.keys())[0] provider_id = list(self.impls_by_provider_id.keys())[0]
else:
raise ValueError(
"No provider specified and multiple providers available. Please specify a provider_id."
)
if provider_benchmark_id is None:
provider_benchmark_id = benchmark_id
benchmark = Benchmark( benchmark = Benchmark(
identifier=benchmark_id, identifier=benchmark_id,
dataset_id=dataset_id, dataset_id=dataset_id,
scoring_functions=scoring_functions, grader_ids=grader_ids,
metadata=metadata, metadata=metadata,
provider_id=provider_id, provider_id=provider_id,
provider_resource_id=provider_benchmark_id, provider_resource_id=benchmark_id,
) )
await self.register_object(benchmark) await self.register_object(benchmark)
return benchmark
class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups): class ToolGroupsRoutingTable(CommonRoutingTableImpl, ToolGroups):

View file

@ -214,27 +214,27 @@ def get_distribution_template() -> DistributionTemplate:
BenchmarkInput( BenchmarkInput(
benchmark_id="meta-reference-simpleqa", benchmark_id="meta-reference-simpleqa",
dataset_id="simpleqa", dataset_id="simpleqa",
scoring_functions=["llm-as-judge::405b-simpleqa"], grader_ids=["llm-as-judge::405b-simpleqa"],
), ),
BenchmarkInput( BenchmarkInput(
benchmark_id="meta-reference-mmlu-cot", benchmark_id="meta-reference-mmlu-cot",
dataset_id="mmlu_cot", dataset_id="mmlu_cot",
scoring_functions=["basic::regex_parser_multiple_choice_answer"], grader_ids=["basic::regex_parser_multiple_choice_answer"],
), ),
BenchmarkInput( BenchmarkInput(
benchmark_id="meta-reference-gpqa-cot", benchmark_id="meta-reference-gpqa-cot",
dataset_id="gpqa_cot", dataset_id="gpqa_cot",
scoring_functions=["basic::regex_parser_multiple_choice_answer"], grader_ids=["basic::regex_parser_multiple_choice_answer"],
), ),
BenchmarkInput( BenchmarkInput(
benchmark_id="meta-reference-math-500", benchmark_id="meta-reference-math-500",
dataset_id="math_500", dataset_id="math_500",
scoring_functions=["basic::regex_parser_math_response"], grader_ids=["basic::regex_parser_math_response"],
), ),
BenchmarkInput( BenchmarkInput(
benchmark_id="meta-reference-bfcl", benchmark_id="meta-reference-bfcl",
dataset_id="bfcl", dataset_id="bfcl",
scoring_functions=["basic::bfcl"], grader_ids=["basic::bfcl"],
), ),
] ]
return DistributionTemplate( return DistributionTemplate(

View file

@ -196,27 +196,27 @@ datasets:
scoring_fns: [] scoring_fns: []
benchmarks: benchmarks:
- dataset_id: simpleqa - dataset_id: simpleqa
scoring_functions: grader_ids:
- llm-as-judge::405b-simpleqa - llm-as-judge::405b-simpleqa
metadata: {} metadata: {}
benchmark_id: meta-reference-simpleqa benchmark_id: meta-reference-simpleqa
- dataset_id: mmlu_cot - dataset_id: mmlu_cot
scoring_functions: grader_ids:
- basic::regex_parser_multiple_choice_answer - basic::regex_parser_multiple_choice_answer
metadata: {} metadata: {}
benchmark_id: meta-reference-mmlu-cot benchmark_id: meta-reference-mmlu-cot
- dataset_id: gpqa_cot - dataset_id: gpqa_cot
scoring_functions: grader_ids:
- basic::regex_parser_multiple_choice_answer - basic::regex_parser_multiple_choice_answer
metadata: {} metadata: {}
benchmark_id: meta-reference-gpqa-cot benchmark_id: meta-reference-gpqa-cot
- dataset_id: math_500 - dataset_id: math_500
scoring_functions: grader_ids:
- basic::regex_parser_math_response - basic::regex_parser_math_response
metadata: {} metadata: {}
benchmark_id: meta-reference-math-500 benchmark_id: meta-reference-math-500
- dataset_id: bfcl - dataset_id: bfcl
scoring_functions: grader_ids:
- basic::bfcl - basic::bfcl
metadata: {} metadata: {}
benchmark_id: meta-reference-bfcl benchmark_id: meta-reference-bfcl

View file

@ -186,6 +186,8 @@ exclude = [
"^llama_stack/apis/tools/tools\\.py$", "^llama_stack/apis/tools/tools\\.py$",
"^llama_stack/apis/vector_dbs/vector_dbs\\.py$", "^llama_stack/apis/vector_dbs/vector_dbs\\.py$",
"^llama_stack/apis/vector_io/vector_io\\.py$", "^llama_stack/apis/vector_io/vector_io\\.py$",
"^llama_stack/apis/graders/graders\\.py$",
"^llama_stack/apis/evaluation/evaluation\\.py$",
"^llama_stack/cli/download\\.py$", "^llama_stack/cli/download\\.py$",
"^llama_stack/cli/llama\\.py$", "^llama_stack/cli/llama\\.py$",
"^llama_stack/cli/stack/_build\\.py$", "^llama_stack/cli/stack/_build\\.py$",