mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix: update tests for OpenAI-style models endpoint (#4053)
The llama-stack-client now uses /`v1/openai/v1/models` which returns OpenAI-compatible model objects with 'id' and 'custom_metadata' fields instead of the Resource-style 'identifier' field. Updated api_recorder to handle the new endpoint and modified tests to access model metadata appropriately. Deleted stale model recordings for re-recording. **NOTE: CI will be red on this one since it is dependent on https://github.com/llamastack/llama-stack-client-python/pull/291/files landing. I verified locally that it is green.**
This commit is contained in:
parent
4a5ef65286
commit
cb40da210f
27 changed files with 852 additions and 6707 deletions
|
|
@ -31,9 +31,11 @@ def decode_base64_to_floats(base64_string: str) -> list[float]:
|
|||
|
||||
|
||||
def provider_from_model(client_with_models, model_id):
|
||||
models = {m.identifier: m for m in client_with_models.models.list()}
|
||||
models.update({m.provider_resource_id: m for m in client_with_models.models.list()})
|
||||
provider_id = models[model_id].provider_id
|
||||
models = {m.id: m for m in client_with_models.models.list()}
|
||||
models.update(
|
||||
{m.custom_metadata["provider_resource_id"]: m for m in client_with_models.models.list() if m.custom_metadata}
|
||||
)
|
||||
provider_id = models[model_id].custom_metadata["provider_id"]
|
||||
providers = {p.provider_id: p for p in client_with_models.providers.list()}
|
||||
return providers[provider_id]
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue