forked from phoenix-oss/llama-stack-mirror
feat(eval api): (2.2/n) delete eval / scoring / scoring_fn apis (#1700)
# What does this PR do? - To make it easier, delete existing `eval/scoring/scoring_function` apis. There will be a bunch of broken impls here. The sequence is: 1. migrate benchmark graders 2. clean up existing scoring functions - Add a skeleton evaluation impl to make tests pass. ## Test Plan tested in following PRs [//]: # (## Documentation)
This commit is contained in:
parent
0048274ec0
commit
c1d18283d2
113 changed files with 408 additions and 3900 deletions
|
@ -16,6 +16,7 @@ from ..datasets.test_datasets import data_url_from_file
|
|||
|
||||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||
@pytest.mark.skip(reason="TODO(xiyan): fix this")
|
||||
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
|
@ -65,6 +66,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||
@pytest.mark.skip(reason="TODO(xiyan): fix this")
|
||||
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue