forked from phoenix-oss/llama-stack-mirror
# What does this PR do? The previous image URLs were sometimes blocked by Cloudflare, causing test failures for some users. This update replaces them with a GitHub-hosted image (`dog.png`) from the `llama-stack` repository, ensuring more reliable access during testing. Signed-off-by: Sébastien Han <seb@redhat.com> [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan ``` $ ollama run llama3.2-vision:latest --keep-alive 2m & $ uv run pytest -v -s -k "ollama" --inference-model=llama3.2-vision:latest llama_stack/providers/tests/inference/test_vision_inference.py /Users/leseb/Documents/AI/llama-stack/.venv/lib/python3.13/site-packages/pytest_asyncio/plugin.py:207: PytestDeprecationWarning: The configuration option "asyncio_default_fixture_loop_scope" is unset. The event loop scope for asynchronous fixtures will default to the fixture caching scope. Future versions of pytest-asyncio will default the loop scope for asynchronous fixtures to function scope. Set the default fixture loop scope explicitly in order to avoid unexpected behavior in the future. Valid fixture loop scopes are: "function", "class", "module", "package", "session" warnings.warn(PytestDeprecationWarning(_DEFAULT_FIXTURE_LOOP_SCOPE_UNSET)) ============================================ test session starts ============================================= platform darwin -- Python 3.13.1, pytest-8.3.4, pluggy-1.5.0 -- /Users/leseb/Documents/AI/llama-stack/.venv/bin/python3 cachedir: .pytest_cache metadata: {'Python': '3.13.1', 'Platform': 'macOS-15.3-arm64-arm-64bit-Mach-O', 'Packages': {'pytest': '8.3.4', 'pluggy': '1.5.0'}, 'Plugins': {'html': '4.1.1', 'metadata': '3.1.1', 'asyncio': '0.25.3', 'anyio': '4.8.0', 'nbval': '0.11.0'}} rootdir: /Users/leseb/Documents/AI/llama-stack configfile: pyproject.toml plugins: html-4.1.1, metadata-3.1.1, asyncio-0.25.3, anyio-4.8.0, nbval-0.11.0 asyncio: mode=Mode.STRICT, asyncio_default_fixture_loop_scope=None collected 39 items / 36 deselected / 3 selected llama_stack/providers/tests/inference/test_vision_inference.py::TestVisionModelInference::test_vision_chat_completion_non_streaming[-ollama-image0-expected_strings0] PASSED llama_stack/providers/tests/inference/test_vision_inference.py::TestVisionModelInference::test_vision_chat_completion_non_streaming[-ollama-image1-expected_strings1] PASSED llama_stack/providers/tests/inference/test_vision_inference.py::TestVisionModelInference::test_vision_chat_completion_streaming[-ollama] PASSED ========================== 3 passed, 36 deselected, 2 warnings in 62.23s (0:01:02) ========================== ``` [//]: # (## Documentation) [//]: # (- [ ] Added a Changelog entry if the change is significant) Signed-off-by: Sébastien Han <seb@redhat.com>
131 lines
3.8 KiB
Python
131 lines
3.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import base64
|
|
import pathlib
|
|
|
|
import pytest
|
|
|
|
|
|
@pytest.fixture(scope="session")
|
|
def inference_provider_type(llama_stack_client):
|
|
providers = llama_stack_client.providers.list()
|
|
inference_providers = [p for p in providers if p.api == "inference"]
|
|
assert len(inference_providers) > 0, "No inference providers found"
|
|
return inference_providers[0].provider_type
|
|
|
|
|
|
@pytest.fixture
|
|
def image_path():
|
|
return pathlib.Path(__file__).parent / "dog.png"
|
|
|
|
|
|
@pytest.fixture
|
|
def base64_image_data(image_path):
|
|
# Convert the image to base64
|
|
return base64.b64encode(image_path.read_bytes()).decode("utf-8")
|
|
|
|
|
|
@pytest.fixture
|
|
def base64_image_url(base64_image_data, image_path):
|
|
# suffix includes the ., so we remove it
|
|
return f"data:image/{image_path.suffix[1:]};base64,{base64_image_data}"
|
|
|
|
|
|
def test_image_chat_completion_non_streaming(llama_stack_client, vision_model_id):
|
|
message = {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "image",
|
|
"image": {
|
|
"url": {
|
|
"uri": "https://raw.githubusercontent.com/meta-llama/llama-stack/main/tests/client-sdk/inference/dog.png"
|
|
},
|
|
},
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Describe what is in this image.",
|
|
},
|
|
],
|
|
}
|
|
response = llama_stack_client.inference.chat_completion(
|
|
model_id=vision_model_id,
|
|
messages=[message],
|
|
stream=False,
|
|
)
|
|
message_content = response.completion_message.content.lower().strip()
|
|
assert len(message_content) > 0
|
|
assert any(expected in message_content for expected in {"dog", "puppy", "pup"})
|
|
|
|
|
|
def test_image_chat_completion_streaming(llama_stack_client, vision_model_id):
|
|
message = {
|
|
"role": "user",
|
|
"content": [
|
|
{
|
|
"type": "image",
|
|
"image": {
|
|
"url": {
|
|
"uri": "https://raw.githubusercontent.com/meta-llama/llama-stack/main/tests/client-sdk/inference/dog.png"
|
|
},
|
|
},
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Describe what is in this image.",
|
|
},
|
|
],
|
|
}
|
|
response = llama_stack_client.inference.chat_completion(
|
|
model_id=vision_model_id,
|
|
messages=[message],
|
|
stream=True,
|
|
)
|
|
streamed_content = ""
|
|
for chunk in response:
|
|
streamed_content += chunk.event.delta.text.lower()
|
|
assert len(streamed_content) > 0
|
|
assert any(expected in streamed_content for expected in {"dog", "puppy", "pup"})
|
|
|
|
|
|
@pytest.mark.parametrize("type_", ["url", "data"])
|
|
def test_image_chat_completion_base64(llama_stack_client, vision_model_id, base64_image_data, base64_image_url, type_):
|
|
image_spec = {
|
|
"url": {
|
|
"type": "image",
|
|
"image": {
|
|
"url": {
|
|
"uri": base64_image_url,
|
|
},
|
|
},
|
|
},
|
|
"data": {
|
|
"type": "image",
|
|
"image": {
|
|
"data": base64_image_data,
|
|
},
|
|
},
|
|
}[type_]
|
|
|
|
message = {
|
|
"role": "user",
|
|
"content": [
|
|
image_spec,
|
|
{
|
|
"type": "text",
|
|
"text": "Describe what is in this image.",
|
|
},
|
|
],
|
|
}
|
|
response = llama_stack_client.inference.chat_completion(
|
|
model_id=vision_model_id,
|
|
messages=[message],
|
|
stream=False,
|
|
)
|
|
message_content = response.completion_message.content.lower().strip()
|
|
assert len(message_content) > 0
|