fix: update tests for OpenAI-style models endpoint (#4053)

The llama-stack-client now uses /`v1/openai/v1/models` which returns
OpenAI-compatible model objects with 'id' and 'custom_metadata' fields
instead of the Resource-style 'identifier' field. Updated api_recorder
to handle the new endpoint and modified tests to access model metadata
appropriately. Deleted stale model recordings for re-recording.

**NOTE: CI will be red on this one since it is dependent on
https://github.com/llamastack/llama-stack-client-python/pull/291/files
landing. I verified locally that it is green.**
This commit is contained in:
Ashwin Bharambe 2025-11-03 17:30:08 -08:00 committed by GitHub
parent 4a5ef65286
commit cb40da210f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 852 additions and 6707 deletions

View file

@ -239,8 +239,13 @@ client = LlamaStackClient(base_url="http://localhost:8321")
models = client.models.list()
# Select the first LLM
llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama")
model_id = llm.identifier
llm = next(
m for m in models
if m.custom_metadata
and m.custom_metadata.get("model_type") == "llm"
and m.custom_metadata.get("provider_id") == "ollama"
)
model_id = llm.id
print("Model:", model_id)
@ -279,8 +284,13 @@ import uuid
client = LlamaStackClient(base_url=f"http://localhost:8321")
models = client.models.list()
llm = next(m for m in models if m.model_type == "llm" and m.provider_id == "ollama")
model_id = llm.identifier
llm = next(
m for m in models
if m.custom_metadata
and m.custom_metadata.get("model_type") == "llm"
and m.custom_metadata.get("provider_id") == "ollama"
)
model_id = llm.id
agent = Agent(client, model=model_id, instructions="You are a helpful assistant.")
@ -450,8 +460,11 @@ import uuid
client = LlamaStackClient(base_url="http://localhost:8321")
# Create a vector database instance
embed_lm = next(m for m in client.models.list() if m.model_type == "embedding")
embedding_model = embed_lm.identifier
embed_lm = next(
m for m in client.models.list()
if m.custom_metadata and m.custom_metadata.get("model_type") == "embedding"
)
embedding_model = embed_lm.id
vector_db_id = f"v{uuid.uuid4().hex}"
# The VectorDB API is deprecated; the server now returns its own authoritative ID.
# We capture the correct ID from the response's .identifier attribute.
@ -489,9 +502,11 @@ client.tool_runtime.rag_tool.insert(
llm = next(
m
for m in client.models.list()
if m.model_type == "llm" and m.provider_id == "ollama"
if m.custom_metadata
and m.custom_metadata.get("model_type") == "llm"
and m.custom_metadata.get("provider_id") == "ollama"
)
model = llm.identifier
model = llm.id
# Create the RAG agent
rag_agent = Agent(