This commit is contained in:
Xi Yan 2025-03-19 10:10:02 -07:00
parent d1b44c1251
commit 443b18a992
5 changed files with 7 additions and 127 deletions

View file

@ -22,9 +22,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
),
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
response = llama_stack_client.datasets.list()
@ -74,9 +72,7 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
purpose="eval/messages-answer",
source={
"type": "uri",
"uri": data_url_from_file(
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
),
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
},
)
benchmark_id = str(uuid.uuid4())
@ -99,14 +95,10 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
},
)
assert response.job_id == "0"
job_status = llama_stack_client.eval.jobs.status(
job_id=response.job_id, benchmark_id=benchmark_id
)
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
assert job_status and job_status == "completed"
eval_response = llama_stack_client.eval.jobs.retrieve(
job_id=response.job_id, benchmark_id=benchmark_id
)
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
assert eval_response is not None
assert len(eval_response.generations) == 5
assert scoring_fn_id in eval_response.scores

View file

@ -154,11 +154,7 @@ def test_scoring_score_with_aggregation_functions(
df = pd.read_csv(Path(__file__).parent.parent / "datasets" / "test_dataset.csv")
rows = df.to_dict(orient="records")
scoring_fns_list = [
x
for x in llama_stack_client.scoring_functions.list()
if x.provider_id == provider_id
]
scoring_fns_list = [x for x in llama_stack_client.scoring_functions.list() if x.provider_id == provider_id]
if len(scoring_fns_list) == 0:
pytest.skip(f"No scoring functions found for provider {provider_id}, skipping")