mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-28 19:38:46 +00:00
This moves the OpenAI Responses API tests under tests/verifications/openai_api/test_response.py and starts to wire them up to our verification suite, so that we can test multiple providers as well as OpenAI directly for the Responses API. Signed-off-by: Ben Browning <bbrownin@redhat.com>
35 lines
1.5 KiB
Python
35 lines
1.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from tests.verifications.openai_api.fixtures.fixtures import _load_all_verification_configs
|
|
|
|
|
|
def pytest_generate_tests(metafunc):
|
|
"""Dynamically parametrize tests based on the selected provider and config."""
|
|
if "model" in metafunc.fixturenames:
|
|
provider = metafunc.config.getoption("provider")
|
|
if not provider:
|
|
print("Warning: --provider not specified. Skipping model parametrization.")
|
|
metafunc.parametrize("model", [])
|
|
return
|
|
|
|
try:
|
|
config_data = _load_all_verification_configs()
|
|
except (FileNotFoundError, IOError) as e:
|
|
print(f"ERROR loading verification configs: {e}")
|
|
config_data = {"providers": {}}
|
|
|
|
provider_config = config_data.get("providers", {}).get(provider)
|
|
if provider_config:
|
|
models = provider_config.get("models", [])
|
|
if models:
|
|
metafunc.parametrize("model", models)
|
|
else:
|
|
print(f"Warning: No models found for provider '{provider}' in config.")
|
|
metafunc.parametrize("model", []) # Parametrize empty if no models found
|
|
else:
|
|
print(f"Warning: Provider '{provider}' not found in config. No models parametrized.")
|
|
metafunc.parametrize("model", []) # Parametrize empty if provider not found
|