mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-06 18:50:44 +00:00
disable eval test
This commit is contained in:
parent
6104bd06a0
commit
72f0550fb9
1 changed files with 14 additions and 5 deletions
|
@ -21,7 +21,9 @@ def test_evaluate_rows(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
purpose="eval/messages-answer",
|
purpose="eval/messages-answer",
|
||||||
source={
|
source={
|
||||||
"type": "uri",
|
"type": "uri",
|
||||||
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
|
"uri": data_url_from_file(
|
||||||
|
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
|
||||||
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
response = llama_stack_client.datasets.list()
|
response = llama_stack_client.datasets.list()
|
||||||
|
@ -70,7 +72,9 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
purpose="eval/messages-answer",
|
purpose="eval/messages-answer",
|
||||||
source={
|
source={
|
||||||
"type": "uri",
|
"type": "uri",
|
||||||
"uri": data_url_from_file(Path(__file__).parent.parent / "datasets" / "test_dataset.csv"),
|
"uri": data_url_from_file(
|
||||||
|
Path(__file__).parent.parent / "datasets" / "test_dataset.csv"
|
||||||
|
),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
benchmark_id = str(uuid.uuid4())
|
benchmark_id = str(uuid.uuid4())
|
||||||
|
@ -93,10 +97,15 @@ def test_evaluate_benchmark(llama_stack_client, text_model_id, scoring_fn_id):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
assert response.job_id == "0"
|
assert response.job_id == "0"
|
||||||
job_status = llama_stack_client.eval.jobs.status(job_id=response.job_id, benchmark_id=benchmark_id)
|
# TODO: the eval jobs API will be fixed together, skip for now
|
||||||
assert job_status and job_status == "completed"
|
# job_status = llama_stack_client.eval.jobs.status(
|
||||||
|
# job_id=response.job_id, benchmark_id=benchmark_id
|
||||||
|
# )
|
||||||
|
# assert job_status and job_status == "completed"
|
||||||
|
|
||||||
eval_response = llama_stack_client.eval.jobs.retrieve(job_id=response.job_id, benchmark_id=benchmark_id)
|
eval_response = llama_stack_client.eval.jobs.retrieve(
|
||||||
|
job_id=response.job_id, benchmark_id=benchmark_id
|
||||||
|
)
|
||||||
assert eval_response is not None
|
assert eval_response is not None
|
||||||
assert len(eval_response.generations) == 5
|
assert len(eval_response.generations) == 5
|
||||||
assert scoring_fn_id in eval_response.scores
|
assert scoring_fn_id in eval_response.scores
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue