mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 02:53:30 +00:00
feat: allow using llama-stack-library-client from verifications (#2238)
Having to run (and re-run) a server while running verifications can be annoying while you are iterating on code. This makes it so you can use the library client -- and because it is OpenAI client compatible, it all works. ## Test Plan ``` pytest -s -v tests/verifications/openai_api/test_responses.py \ --provider=stack:together \ --model meta-llama/Llama-4-Scout-17B-16E-Instruct ```
This commit is contained in:
parent
558d109ab7
commit
6463ee7633
3 changed files with 31 additions and 7 deletions
|
@ -16,6 +16,11 @@ def pytest_generate_tests(metafunc):
|
|||
metafunc.parametrize("model", [])
|
||||
return
|
||||
|
||||
model = metafunc.config.getoption("model")
|
||||
if model:
|
||||
metafunc.parametrize("model", [model])
|
||||
return
|
||||
|
||||
try:
|
||||
config_data = _load_all_verification_configs()
|
||||
except (OSError, FileNotFoundError) as e:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue