feat(eval api): (2.2/n) delete eval / scoring / scoring_fn apis (#1700)

# What does this PR do?
- To make it easier, delete existing `eval/scoring/scoring_function`
apis. There will be a bunch of broken impls here. The sequence is:
1. migrate benchmark graders
2. clean up existing scoring functions

- Add a skeleton evaluation impl to make tests pass. 

## Test Plan
tested in following PRs

[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-19 11:04:23 -07:00 committed by GitHub
parent 0048274ec0
commit c1d18283d2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
113 changed files with 408 additions and 3900 deletions

View file

@ -18,15 +18,9 @@ distribution_spec:
- inline::meta-reference
telemetry:
- inline::meta-reference
eval:
- inline::meta-reference
datasetio:
- remote::huggingface
- inline::localfs
scoring:
- inline::basic
- inline::llm-as-judge
- inline::braintrust
tool_runtime:
- remote::brave-search
- remote::tavily-search

View file

@ -9,7 +9,6 @@ from typing import Dict, List, Tuple
from llama_stack.apis.datasets import DatasetPurpose, URIDataSource
from llama_stack.apis.models.models import ModelType
from llama_stack.distribution.datatypes import (
BenchmarkInput,
DatasetInput,
ModelInput,
Provider,
@ -102,9 +101,7 @@ def get_distribution_template() -> DistributionTemplate:
"safety": ["inline::llama-guard"],
"agents": ["inline::meta-reference"],
"telemetry": ["inline::meta-reference"],
"eval": ["inline::meta-reference"],
"datasetio": ["remote::huggingface", "inline::localfs"],
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
"tool_runtime": [
"remote::brave-search",
"remote::tavily-search",
@ -210,33 +207,35 @@ def get_distribution_template() -> DistributionTemplate:
),
]
default_benchmarks = [
BenchmarkInput(
benchmark_id="meta-reference-simpleqa",
dataset_id="simpleqa",
grader_ids=["llm-as-judge::405b-simpleqa"],
),
BenchmarkInput(
benchmark_id="meta-reference-mmlu-cot",
dataset_id="mmlu_cot",
grader_ids=["basic::regex_parser_multiple_choice_answer"],
),
BenchmarkInput(
benchmark_id="meta-reference-gpqa-cot",
dataset_id="gpqa_cot",
grader_ids=["basic::regex_parser_multiple_choice_answer"],
),
BenchmarkInput(
benchmark_id="meta-reference-math-500",
dataset_id="math_500",
grader_ids=["basic::regex_parser_math_response"],
),
BenchmarkInput(
benchmark_id="meta-reference-bfcl",
dataset_id="bfcl",
grader_ids=["basic::bfcl"],
),
]
# TODO(xiyan): fix this back as registerable resources
# default_benchmarks = [
# BenchmarkInput(
# benchmark_id="meta-reference-simpleqa",
# dataset_id="simpleqa",
# grader_ids=["llm-as-judge::405b-simpleqa"],
# ),
# BenchmarkInput(
# benchmark_id="meta-reference-mmlu-cot",
# dataset_id="mmlu_cot",
# grader_ids=["basic::regex_parser_multiple_choice_answer"],
# ),
# BenchmarkInput(
# benchmark_id="meta-reference-gpqa-cot",
# dataset_id="gpqa_cot",
# grader_ids=["basic::regex_parser_multiple_choice_answer"],
# ),
# BenchmarkInput(
# benchmark_id="meta-reference-math-500",
# dataset_id="math_500",
# grader_ids=["basic::regex_parser_math_response"],
# ),
# BenchmarkInput(
# benchmark_id="meta-reference-bfcl",
# dataset_id="bfcl",
# grader_ids=["basic::bfcl"],
# ),
# ]
return DistributionTemplate(
name=name,
distro_type="self_hosted",
@ -255,7 +254,6 @@ def get_distribution_template() -> DistributionTemplate:
default_tool_groups=default_tool_groups,
default_shields=[ShieldInput(shield_id="meta-llama/Llama-Guard-3-8B")],
default_datasets=default_datasets,
default_benchmarks=default_benchmarks,
),
},
run_config_env_vars={

View file

@ -3,10 +3,8 @@ image_name: open-benchmark
apis:
- agents
- datasetio
- eval
- inference
- safety
- scoring
- telemetry
- tool_runtime
- vector_io
@ -71,14 +69,6 @@ providers:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/open-benchmark/trace_store.db}
eval:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
kvstore:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/meta_reference_eval.db
datasetio:
- provider_id: huggingface
provider_type: remote::huggingface
@ -94,17 +84,6 @@ providers:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/open-benchmark}/localfs_datasetio.db
scoring:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}
- provider_id: braintrust
provider_type: inline::braintrust
config:
openai_api_key: ${env.OPENAI_API_KEY:}
tool_runtime:
- provider_id: brave-search
provider_type: remote::brave-search
@ -193,33 +172,7 @@ datasets:
metadata: {}
dataset_id: bfcl
provider_id: huggingface
scoring_fns: []
benchmarks:
- dataset_id: simpleqa
grader_ids:
- llm-as-judge::405b-simpleqa
metadata: {}
benchmark_id: meta-reference-simpleqa
- dataset_id: mmlu_cot
grader_ids:
- basic::regex_parser_multiple_choice_answer
metadata: {}
benchmark_id: meta-reference-mmlu-cot
- dataset_id: gpqa_cot
grader_ids:
- basic::regex_parser_multiple_choice_answer
metadata: {}
benchmark_id: meta-reference-gpqa-cot
- dataset_id: math_500
grader_ids:
- basic::regex_parser_math_response
metadata: {}
benchmark_id: meta-reference-math-500
- dataset_id: bfcl
grader_ids:
- basic::bfcl
metadata: {}
benchmark_id: meta-reference-bfcl
benchmarks: []
tool_groups:
- toolgroup_id: builtin::websearch
provider_id: tavily-search