mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
fix: a couple of tests were broken and not yet exercised by our per-PR test workflow
This commit is contained in:
parent
34f89bfbd6
commit
d6887f46c6
2 changed files with 8 additions and 6 deletions
|
@ -12,11 +12,12 @@ from llama_stack import LlamaStackAsLibraryClient
|
|||
|
||||
class TestProviders:
|
||||
@pytest.mark.asyncio
|
||||
def test_list(self, llama_stack_client: LlamaStackAsLibraryClient | LlamaStackClient):
|
||||
def test_providers(self, llama_stack_client: LlamaStackAsLibraryClient | LlamaStackClient):
|
||||
provider_list = llama_stack_client.providers.list()
|
||||
assert provider_list is not None
|
||||
assert len(provider_list) > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
def test_inspect(self, llama_stack_client: LlamaStackAsLibraryClient | LlamaStackClient):
|
||||
provider_list = llama_stack_client.providers.retrieve("ollama")
|
||||
assert provider_list is not None
|
||||
for provider in provider_list:
|
||||
pid = provider.provider_id
|
||||
provider = llama_stack_client.providers.retrieve(pid)
|
||||
assert provider is not None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue