fix: update tests for OpenAI-style models endpoint

The llama-stack-client now uses /v1/openai/v1/models which returns OpenAI-compatible model objects with 'id' and 'custom_metadata' fields instead of the Resource-style 'identifier' field. Updated api_recorder to handle the new endpoint and modified tests to access model metadata appropriately. Deleted stale model recordings for re-recording.
This commit is contained in:
Ashwin Bharambe 2025-11-03 16:16:16 -08:00
parent 4a5ef65286
commit 809dae01c2
24 changed files with 823 additions and 6697 deletions

View file

@ -40,7 +40,7 @@ def text_model(request, client_with_models):
model_id = request.param
# Check if the model is available
available_models = [m.identifier for m in client_with_models.models.list()]
available_models = [m.id for m in client_with_models.models.list()]
if model_id not in available_models:
pytest.skip(
@ -76,7 +76,7 @@ def vision_model(request, client_with_models):
model_id = request.param
# Check if the model is available
available_models = [m.identifier for m in client_with_models.models.list()]
available_models = [m.id for m in client_with_models.models.list()]
if model_id not in available_models:
pytest.skip(