fix: update tests for OpenAI-style models endpoint

The llama-stack-client now uses /v1/openai/v1/models which returns OpenAI-compatible model objects with 'id' and 'custom_metadata' fields instead of the Resource-style 'identifier' field. Updated api_recorder to handle the new endpoint and modified tests to access model metadata appropriately. Deleted stale model recordings for re-recording.
This commit is contained in:
Ashwin Bharambe 2025-11-03 16:16:16 -08:00
parent 4a5ef65286
commit 809dae01c2
24 changed files with 823 additions and 6697 deletions

View file

@ -368,10 +368,8 @@ def vector_provider_wrapper(func):
return func(*args, **kwargs)
# For CI tests (replay/record), only use providers that are available in ci-tests environment
if os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE") in ("replay", "record"):
all_providers = ["faiss", "sqlite-vec"]
else:
inference_mode = os.environ.get("LLAMA_STACK_TEST_INFERENCE_MODE")
if inference_mode == "live":
# For live tests, try all providers (they'll skip if not available)
all_providers = [
"faiss",
@ -382,6 +380,9 @@ def vector_provider_wrapper(func):
"weaviate",
"qdrant",
]
else:
# For CI tests (replay/record), only use providers that are available in ci-tests environment
all_providers = ["faiss", "sqlite-vec"]
return pytest.mark.parametrize("vector_io_provider_id", all_providers)(wrapper)