mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-18 15:39:47 +00:00
move server requirement directly to openai_client
This commit is contained in:
parent
a01596a5fe
commit
639e3f4570
2 changed files with 15 additions and 16 deletions
|
|
@ -10,7 +10,6 @@ from unittest.mock import patch
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from llama_stack.core.datatypes import User
|
from llama_stack.core.datatypes import User
|
||||||
from llama_stack.core.library_client import LlamaStackAsLibraryClient
|
|
||||||
|
|
||||||
|
|
||||||
# a fixture to skip all these tests if a files provider is not available
|
# a fixture to skip all these tests if a files provider is not available
|
||||||
|
|
@ -20,20 +19,7 @@ def skip_if_no_files_provider(llama_stack_client):
|
||||||
pytest.skip("No files providers found")
|
pytest.skip("No files providers found")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
def test_openai_client_basic_operations(openai_client):
|
||||||
def skip_if_no_server_running(llama_stack_client):
|
|
||||||
"""
|
|
||||||
Skip test if no server is running.
|
|
||||||
|
|
||||||
We use the llama_stack_client to tell if a server was started or not.
|
|
||||||
|
|
||||||
We use this with openai_client because it relies on a running server.
|
|
||||||
"""
|
|
||||||
if isinstance(llama_stack_client, LlamaStackAsLibraryClient):
|
|
||||||
pytest.skip("No server running")
|
|
||||||
|
|
||||||
|
|
||||||
def test_openai_client_basic_operations(openai_client, skip_if_no_server_running):
|
|
||||||
"""Test basic file operations through OpenAI client."""
|
"""Test basic file operations through OpenAI client."""
|
||||||
from openai import NotFoundError
|
from openai import NotFoundError
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -263,7 +263,20 @@ def instantiate_llama_stack_client(session):
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def openai_client(llama_stack_client):
|
def require_server(llama_stack_client):
|
||||||
|
"""
|
||||||
|
Skip test if no server is running.
|
||||||
|
|
||||||
|
We use the llama_stack_client to tell if a server was started or not.
|
||||||
|
|
||||||
|
We use this with openai_client because it relies on a running server.
|
||||||
|
"""
|
||||||
|
if isinstance(llama_stack_client, LlamaStackAsLibraryClient):
|
||||||
|
pytest.skip("No server running")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def openai_client(llama_stack_client, require_server):
|
||||||
base_url = f"{llama_stack_client.base_url}/v1/openai/v1"
|
base_url = f"{llama_stack_client.base_url}/v1/openai/v1"
|
||||||
return OpenAI(base_url=base_url, api_key="fake")
|
return OpenAI(base_url=base_url, api_key="fake")
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue