fix provider model list test (#800)

Fixes provider tests

```
pytest -v -s -k "together or fireworks or ollama" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py 
```
```
...
.... 
llama_stack/providers/tests/inference/test_text_inference.py::TestInference::test_chat_completion_streaming[-together] PASSED
llama_stack/providers/tests/inference/test_text_inference.py::TestInference::test_chat_completion_with_tool_calling[-together] PASSED
llama_stack/providers/tests/inference/test_text_inference.py::TestInference::test_chat_completion_with_tool_calling_streaming[-together] PASSED

================ 21 passed, 6 skipped, 81 deselected, 5 warnings in 32.11s =================
```

Co-authored-by: Hardik Shah <hjshah@fb.com>
This commit is contained in:
Hardik Shah 2025-01-16 19:27:29 -08:00 committed by GitHub
parent 9f14382d82
commit b2ac29b9da
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -31,7 +31,7 @@ from llama_stack.apis.inference import (
ToolChoice,
UserMessage,
)
from llama_stack.apis.models import Model
from llama_stack.apis.models import ListModelsResponse, Model
from .utils import group_chunks
@ -92,12 +92,13 @@ class TestInference:
async def test_model_list(self, inference_model, inference_stack):
_, models_impl = inference_stack
response = await models_impl.list_models()
assert isinstance(response, list)
assert len(response) >= 1
assert all(isinstance(model, Model) for model in response)
assert isinstance(response, ListModelsResponse)
assert isinstance(response.data, list)
assert len(response.data) >= 1
assert all(isinstance(model, Model) for model in response.data)
model_def = None
for model in response:
for model in response.data:
if model.identifier == inference_model:
model_def = model
break