mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-18 07:18:53 +00:00
fix(tests): reduce some test noise (#3825)
a bunch of logger.info()s are good for server code to help debug in production, but we don't want them killing our unit test output :) --------- Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
This commit is contained in:
parent
0c368492b7
commit
07fc8013eb
5 changed files with 26 additions and 25 deletions
|
@ -550,7 +550,7 @@ class OpenAIVectorStoreMixin(ABC):
|
|||
logger.info(f"Using default embedding model: {model_id} with dimension {embedding_dimension}")
|
||||
return model_id, embedding_dimension
|
||||
|
||||
logger.info("DEBUG: No default embedding models found")
|
||||
logger.debug("No default embedding models found")
|
||||
return None
|
||||
|
||||
async def openai_list_vector_stores(
|
||||
|
|
|
@ -329,3 +329,4 @@ classmethod-decorators = ["classmethod", "pydantic.field_validator"]
|
|||
addopts = ["--durations=10"]
|
||||
asyncio_mode = "auto"
|
||||
markers = ["allow_network: Allow network access for specific unit tests"]
|
||||
filterwarnings = "ignore::DeprecationWarning"
|
||||
|
|
|
@ -4,17 +4,17 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
import pytest_socket
|
||||
|
||||
# We need to import the fixtures here so that pytest can find them
|
||||
# but ruff doesn't think they are used and removes the import. "noqa: F401" prevents them from being removed
|
||||
from .fixtures import cached_disk_dist_registry, disk_dist_registry, sqlite_kvstore # noqa: F401
|
||||
import os
|
||||
import warnings
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
"""Setup for each test - check if network access should be allowed."""
|
||||
if "allow_network" in item.keywords:
|
||||
pytest_socket.enable_socket()
|
||||
else:
|
||||
# Allowing Unix sockets is necessary for some tests that use local servers and mocks
|
||||
pytest_socket.disable_socket(allow_unix_socket=True)
|
||||
def pytest_sessionstart(session) -> None:
|
||||
if "LLAMA_STACK_LOGGING" not in os.environ:
|
||||
os.environ["LLAMA_STACK_LOGGING"] = "all=WARNING"
|
||||
|
||||
# Silence common deprecation spam during unit tests.
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
|
||||
|
||||
pytest_plugins = ["tests.unit.fixtures"]
|
||||
|
|
|
@ -15,16 +15,16 @@ from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOp
|
|||
|
||||
|
||||
# Test fixtures and helper classes
|
||||
class TestConfig(BaseModel):
|
||||
class FakeConfig(BaseModel):
|
||||
api_key: str | None = Field(default=None)
|
||||
|
||||
|
||||
class TestProviderDataValidator(BaseModel):
|
||||
class FakeProviderDataValidator(BaseModel):
|
||||
test_api_key: str | None = Field(default=None)
|
||||
|
||||
|
||||
class TestLiteLLMAdapter(LiteLLMOpenAIMixin):
|
||||
def __init__(self, config: TestConfig):
|
||||
class FakeLiteLLMAdapter(LiteLLMOpenAIMixin):
|
||||
def __init__(self, config: FakeConfig):
|
||||
super().__init__(
|
||||
litellm_provider_name="test",
|
||||
api_key_from_config=config.api_key,
|
||||
|
@ -36,11 +36,11 @@ class TestLiteLLMAdapter(LiteLLMOpenAIMixin):
|
|||
@pytest.fixture
|
||||
def adapter_with_config_key():
|
||||
"""Fixture to create adapter with API key in config"""
|
||||
config = TestConfig(api_key="config-api-key")
|
||||
adapter = TestLiteLLMAdapter(config)
|
||||
config = FakeConfig(api_key="config-api-key")
|
||||
adapter = FakeLiteLLMAdapter(config)
|
||||
adapter.__provider_spec__ = MagicMock()
|
||||
adapter.__provider_spec__.provider_data_validator = (
|
||||
"tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator"
|
||||
"tests.unit.providers.inference.test_litellm_openai_mixin.FakeProviderDataValidator"
|
||||
)
|
||||
return adapter
|
||||
|
||||
|
@ -48,11 +48,11 @@ def adapter_with_config_key():
|
|||
@pytest.fixture
|
||||
def adapter_without_config_key():
|
||||
"""Fixture to create adapter without API key in config"""
|
||||
config = TestConfig(api_key=None)
|
||||
adapter = TestLiteLLMAdapter(config)
|
||||
config = FakeConfig(api_key=None)
|
||||
adapter = FakeLiteLLMAdapter(config)
|
||||
adapter.__provider_spec__ = MagicMock()
|
||||
adapter.__provider_spec__.provider_data_validator = (
|
||||
"tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator"
|
||||
"tests.unit.providers.inference.test_litellm_openai_mixin.FakeProviderDataValidator"
|
||||
)
|
||||
return adapter
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig
|
|||
from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter
|
||||
|
||||
|
||||
class TestNVIDIASafetyAdapter(NVIDIASafetyAdapter):
|
||||
class FakeNVIDIASafetyAdapter(NVIDIASafetyAdapter):
|
||||
"""Test implementation that provides the required shield_store."""
|
||||
|
||||
def __init__(self, config: NVIDIASafetyConfig, shield_store):
|
||||
|
@ -41,7 +41,7 @@ def nvidia_adapter():
|
|||
shield_store = AsyncMock()
|
||||
shield_store.get_shield = AsyncMock()
|
||||
|
||||
adapter = TestNVIDIASafetyAdapter(config=config, shield_store=shield_store)
|
||||
adapter = FakeNVIDIASafetyAdapter(config=config, shield_store=shield_store)
|
||||
|
||||
return adapter
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue