forked from phoenix-oss/llama-stack-mirror
fix integeration
This commit is contained in:
parent
a6fa3aa5a2
commit
5cf7779b8f
2 changed files with 87 additions and 29 deletions
|
@ -3,11 +3,13 @@
|
|||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
import os
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from ..datasetio.test_datasetio import register_dataset
|
||||
from ..datasets.test_datasets import data_url_from_file
|
||||
|
||||
# How to run this test:
|
||||
#
|
||||
|
@ -16,12 +18,20 @@ from ..datasetio.test_datasetio import register_dataset
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::equality"])
|
||||
def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval")
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
source={
|
||||
"type": "uri",
|
||||
"uri": data_url_from_file(
|
||||
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
|
||||
),
|
||||
},
|
||||
)
|
||||
response = llama_stack_client.datasets.list()
|
||||
assert any(x.identifier == "test_dataset_for_eval" for x in response)
|
||||
assert any(x.identifier == dataset.identifier for x in response)
|
||||
|
||||
rows = llama_stack_client.datasets.iterrows(
|
||||
dataset_id="test_dataset_for_eval",
|
||||
dataset_id=dataset.identifier,
|
||||
limit=3,
|
||||
)
|
||||
assert len(rows.data) == 3
|
||||
|
@ -32,7 +42,7 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
benchmark_id = str(uuid.uuid4())
|
||||
llama_stack_client.benchmarks.register(
|
||||
benchmark_id=benchmark_id,
|
||||
dataset_id="test_dataset_for_eval",
|
||||
dataset_id=dataset.identifier,
|
||||
scoring_functions=scoring_functions,
|
||||
)
|
||||
list_benchmarks = llama_stack_client.benchmarks.list()
|
||||
|
@ -59,11 +69,19 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
|
||||
@pytest.mark.parametrize("scoring_fn_id", ["basic::subset_of"])
|
||||
def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||
register_dataset(llama_stack_client, for_generation=True, dataset_id="test_dataset_for_eval_2")
|
||||
dataset = llama_stack_client.datasets.register(
|
||||
purpose="eval/messages-answer",
|
||||
source={
|
||||
"type": "uri",
|
||||
"uri": data_url_from_file(
|
||||
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
|
||||
),
|
||||
},
|
||||
)
|
||||
benchmark_id = str(uuid.uuid4())
|
||||
llama_stack_client.benchmarks.register(
|
||||
benchmark_id=benchmark_id,
|
||||
dataset_id="test_dataset_for_eval_2",
|
||||
dataset_id=dataset.identifier,
|
||||
scoring_functions=[scoring_fn_id],
|
||||
)
|
||||
|
||||
|
@ -80,10 +98,14 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
|||
},
|
||||
)
|
||||
assert response.job_id == "0"
|
||||
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
job_status = llama_stack_client.eval.jobs.status(
|
||||
job_id=response.job_id, benchmark_id=benchmark_id
|
||||
)
|
||||
assert job_status and job_status == "completed"
|
||||
|
||||
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
|
||||
eval_response = llama_stack_client.eval.jobs.retrieve(
|
||||
job_id=response.job_id, benchmark_id=benchmark_id
|
||||
)
|
||||
assert eval_response is not None
|
||||
assert len(eval_response.generations) == 5
|
||||
assert scoring_fn_id in eval_response.scores
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue