feat(eval api): (2.2/n) delete eval / scoring / scoring_fn apis (#1700)

# What does this PR do?
- To make it easier, delete existing `eval/scoring/scoring_function`
apis. There will be a bunch of broken impls here. The sequence is:
1. migrate benchmark graders
2. clean up existing scoring functions

- Add a skeleton evaluation impl to make tests pass. 

## Test Plan
tested in following PRs

[//]: # (## Documentation)
This commit is contained in:
Xi Yan 2025-03-19 11:04:23 -07:00 committed by GitHub
parent 0048274ec0
commit c1d18283d2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
113 changed files with 408 additions and 3900 deletions

View file

@ -43,12 +43,14 @@ def register_scoring_function(
)
@pytest.mark.skip(reason="TODO(xiyan): fix this")
def test_scoring_functions_list(llama_stack_client):
response = llama_stack_client.scoring_functions.list()
assert isinstance(response, list)
assert len(response) > 0
@pytest.mark.skip(reason="TODO(xiyan): fix this")
def test_scoring_functions_register(
llama_stack_client,
sample_scoring_fn_id,
@ -81,6 +83,7 @@ def test_scoring_functions_register(
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
@pytest.mark.skip(reason="TODO(xiyan): fix this")
def test_scoring_score(llama_stack_client, scoring_fn_id):
# scoring individual rows
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
@ -100,6 +103,7 @@ def test_scoring_score(llama_stack_client, scoring_fn_id):
assert len(response.results[x].score_rows) == len(rows)
@pytest.mark.skip(reason="TODO(xiyan): fix this")
def test_scoring_score_with_params_llm_as_judge(
llama_stack_client,
sample_judge_prompt_template,
@ -139,6 +143,7 @@ def test_scoring_score_with_params_llm_as_judge(
"braintrust",
],
)
@pytest.mark.skip(reason="TODO(xiyan): fix this")
def test_scoring_score_with_aggregation_functions(
llama_stack_client,
sample_judge_prompt_template,