mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
fix vllm unit tests, openai.resources.models.AsyncModels.list is not async
This commit is contained in:
parent
3ea30c0a9c
commit
3fb643b847
1 changed files with 1 additions and 1 deletions
|
@ -62,7 +62,7 @@ from llama_stack.providers.remote.inference.vllm.vllm import (
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="module")
|
||||||
def mock_openai_models_list():
|
def mock_openai_models_list():
|
||||||
with patch("openai.resources.models.AsyncModels.list", new_callable=AsyncMock) as mock_list:
|
with patch("openai.resources.models.AsyncModels.list") as mock_list:
|
||||||
yield mock_list
|
yield mock_list
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue