mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix: update tests for OpenAI-style models endpoint (#4053)
The llama-stack-client now uses /`v1/openai/v1/models` which returns OpenAI-compatible model objects with 'id' and 'custom_metadata' fields instead of the Resource-style 'identifier' field. Updated api_recorder to handle the new endpoint and modified tests to access model metadata appropriately. Deleted stale model recordings for re-recording. **NOTE: CI will be red on this one since it is dependent on https://github.com/llamastack/llama-stack-client-python/pull/291/files landing. I verified locally that it is green.**
This commit is contained in:
parent
4a5ef65286
commit
cb40da210f
27 changed files with 852 additions and 6707 deletions
|
|
@ -368,10 +368,8 @@ def vector_provider_wrapper(func):
|
|||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
# For CI tests (replay/record), only use providers that are available in ci-tests environment
|
||||
if os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE") in ("replay", "record"):
|
||||
all_providers = ["faiss", "sqlite-vec"]
|
||||
else:
|
||||
inference_mode = os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE")
|
||||
if inference_mode == "live":
|
||||
# For live tests, try all providers (they'll skip if not available)
|
||||
all_providers = [
|
||||
"faiss",
|
||||
|
|
@ -382,6 +380,9 @@ def vector_provider_wrapper(func):
|
|||
"weaviate",
|
||||
"qdrant",
|
||||
]
|
||||
else:
|
||||
# For CI tests (replay/record), only use providers that are available in ci-tests environment
|
||||
all_providers = ["faiss", "sqlite-vec"]
|
||||
|
||||
return pytest.mark.parametrize("vector_io_provider_id", all_providers)(wrapper)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue