mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-31 05:59:59 +00:00
fixes and linting
This commit is contained in:
parent
021dd0d35d
commit
5251d2422d
8 changed files with 149 additions and 345 deletions
|
|
@ -27,9 +27,7 @@ def base64_image_url(base64_image_data, image_path):
|
|||
return f"data:image/{image_path.suffix[1:]};base64,{base64_image_data}"
|
||||
|
||||
|
||||
# @pytest.mark.xfail(
|
||||
# reason="This test is failing because the image is not being downloaded correctly."
|
||||
# )
|
||||
@pytest.mark.xfail(reason="This test is failing because the image is not being downloaded correctly.")
|
||||
def test_image_chat_completion_non_streaming(client_with_models, vision_model_id):
|
||||
message = {
|
||||
"role": "user",
|
||||
|
|
@ -58,9 +56,7 @@ def test_image_chat_completion_non_streaming(client_with_models, vision_model_id
|
|||
assert any(expected in message_content for expected in {"dog", "puppy", "pup"})
|
||||
|
||||
|
||||
# @pytest.mark.xfail(
|
||||
# reason="This test is failing because the image is not being downloaded correctly."
|
||||
# )
|
||||
@pytest.mark.xfail(reason="This test is failing because the image is not being downloaded correctly.")
|
||||
def test_image_chat_completion_streaming(client_with_models, vision_model_id):
|
||||
message = {
|
||||
"role": "user",
|
||||
|
|
@ -92,9 +88,7 @@ def test_image_chat_completion_streaming(client_with_models, vision_model_id):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("type_", ["url"])
|
||||
def test_image_chat_completion_base64(
|
||||
client_with_models, vision_model_id, base64_image_data, base64_image_url, type_
|
||||
):
|
||||
def test_image_chat_completion_base64(client_with_models, vision_model_id, base64_image_data, base64_image_url, type_):
|
||||
image_spec = {
|
||||
"url": {
|
||||
"type": "image",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue