mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-27 23:40:24 +00:00
fixes
This commit is contained in:
parent
cad646478f
commit
bf8d76f19b
3 changed files with 7 additions and 8 deletions
|
|
@ -10,17 +10,17 @@ from tests.verifications.openai_api.fixtures.fixtures import _load_all_verificat
|
|||
def pytest_generate_tests(metafunc):
|
||||
"""Dynamically parametrize tests based on the selected provider and config."""
|
||||
if "model" in metafunc.fixturenames:
|
||||
model = metafunc.config.getoption("model")
|
||||
if model:
|
||||
metafunc.parametrize("model", [model])
|
||||
return
|
||||
|
||||
provider = metafunc.config.getoption("provider")
|
||||
if not provider:
|
||||
print("Warning: --provider not specified. Skipping model parametrization.")
|
||||
metafunc.parametrize("model", [])
|
||||
return
|
||||
|
||||
model = metafunc.config.getoption("model")
|
||||
if model:
|
||||
metafunc.parametrize("model", [model])
|
||||
return
|
||||
|
||||
try:
|
||||
config_data = _load_all_verification_configs()
|
||||
except (OSError, FileNotFoundError) as e:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue