forked from phoenix-oss/llama-stack-mirror
refactor(test): introduce --stack-config and simplify options (#1404)
You now run the integration tests with these options: ```bash Custom options: --stack-config=STACK_CONFIG a 'pointer' to the stack. this can be either be: (a) a template name like `fireworks`, or (b) a path to a run.yaml file, or (c) an adhoc config spec, e.g. `inference=fireworks,safety=llama-guard,agents=meta- reference` --env=ENV Set environment variables, e.g. --env KEY=value --text-model=TEXT_MODEL comma-separated list of text models. Fixture name: text_model_id --vision-model=VISION_MODEL comma-separated list of vision models. Fixture name: vision_model_id --embedding-model=EMBEDDING_MODEL comma-separated list of embedding models. Fixture name: embedding_model_id --safety-shield=SAFETY_SHIELD comma-separated list of safety shields. Fixture name: shield_id --judge-model=JUDGE_MODEL comma-separated list of judge models. Fixture name: judge_model_id --embedding-dimension=EMBEDDING_DIMENSION Output dimensionality of the embedding model to use for testing. Default: 384 --record-responses Record new API responses instead of using cached ones. --report=REPORT Path where the test report should be written, e.g. --report=/path/to/report.md ``` Importantly, if you don't specify any of the models (text-model, vision-model, etc.) the relevant tests will get **skipped!** This will make running tests somewhat more annoying since all options will need to be specified. We will make this easier by adding some easy wrapper yaml configs. ## Test Plan Example: ```bash ashwin@ashwin-mbp ~/local/llama-stack/tests/integration (unify_tests) $ LLAMA_STACK_CONFIG=fireworks pytest -s -v inference/test_text_inference.py \ --text-model meta-llama/Llama-3.2-3B-Instruct ```
This commit is contained in:
parent
a0d6b165b0
commit
2fe976ed0a
15 changed files with 536 additions and 1144 deletions
71
tests/integration/safety/test_vision_safety.py
Normal file
71
tests/integration/safety/test_vision_safety.py
Normal file
|
@ -0,0 +1,71 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import base64
|
||||
import mimetypes
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from llama_stack.apis.safety import ViolationLevel
|
||||
|
||||
VISION_SHIELD_ENABLED_PROVIDERS = {"together"}
|
||||
|
||||
|
||||
def data_url_from_image(file_path):
|
||||
mime_type, _ = mimetypes.guess_type(file_path)
|
||||
if mime_type is None:
|
||||
raise ValueError("Could not determine MIME type of the file")
|
||||
|
||||
with open(file_path, "rb") as image_file:
|
||||
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
data_url = f"data:{mime_type};base64,{encoded_string}"
|
||||
return data_url
|
||||
|
||||
|
||||
def test_safety_with_image(llama_stack_client, model_providers):
|
||||
vision_safety_enabled = len(VISION_SHIELD_ENABLED_PROVIDERS & model_providers) > 0
|
||||
if not vision_safety_enabled:
|
||||
pytest.skip(f"Testing vision shields is not supported for model_providers {model_providers}")
|
||||
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
unsafe_example_img_path = f"{current_dir}/resources/example_unsafe.jpg"
|
||||
safe_example_img_path = f"{current_dir}/resources/example_safe.jpg"
|
||||
|
||||
examples = {
|
||||
"safe": safe_example_img_path,
|
||||
"unsafe": unsafe_example_img_path,
|
||||
}
|
||||
|
||||
for _, file_path in examples.items():
|
||||
message = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Tell me how to assemble this",
|
||||
},
|
||||
{
|
||||
"type": "image",
|
||||
"image": {"url": {"uri": data_url_from_image(file_path)}},
|
||||
},
|
||||
],
|
||||
}
|
||||
response = llama_stack_client.safety.run_shield(
|
||||
messages=[message],
|
||||
shield_id="meta-llama/Llama-Guard-3-11B-Vision",
|
||||
params={},
|
||||
)
|
||||
assert response is not None
|
||||
|
||||
# FIXME: We are getting flaky results with the unsafe example:
|
||||
# 1. sometimes it is marked as safe
|
||||
# 2. sometimes it is marked as unsafe but with incorrect violation_type
|
||||
# 3. sometimes it is marked as unsafe with correct violation_type
|
||||
if response.violation is not None:
|
||||
assert response.violation.violation_level == ViolationLevel.ERROR.value
|
||||
assert response.violation.user_message == "I can't answer that. Can I help with something else?"
|
Loading…
Add table
Add a link
Reference in a new issue