mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-11 19:56:03 +00:00
fix: update tests for OpenAI-style models endpoint
The llama-stack-client now uses /v1/openai/v1/models which returns OpenAI-compatible model objects with 'id' and 'custom_metadata' fields instead of the Resource-style 'identifier' field. Updated api_recorder to handle the new endpoint and modified tests to access model metadata appropriately. Deleted stale model recordings for re-recording.
This commit is contained in:
parent
4a5ef65286
commit
809dae01c2
24 changed files with 823 additions and 6697 deletions
|
|
@ -40,7 +40,7 @@ def text_model(request, client_with_models):
|
|||
model_id = request.param
|
||||
|
||||
# Check if the model is available
|
||||
available_models = [m.identifier for m in client_with_models.models.list()]
|
||||
available_models = [m.id for m in client_with_models.models.list()]
|
||||
|
||||
if model_id not in available_models:
|
||||
pytest.skip(
|
||||
|
|
@ -76,7 +76,7 @@ def vision_model(request, client_with_models):
|
|||
model_id = request.param
|
||||
|
||||
# Check if the model is available
|
||||
available_models = [m.identifier for m in client_with_models.models.list()]
|
||||
available_models = [m.id for m in client_with_models.models.list()]
|
||||
|
||||
if model_id not in available_models:
|
||||
pytest.skip(
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue