diff --git a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py index d9f8ba550..0e550434e 100644 --- a/llama_stack/providers/utils/memory/openai_vector_store_mixin.py +++ b/llama_stack/providers/utils/memory/openai_vector_store_mixin.py @@ -550,7 +550,7 @@ class OpenAIVectorStoreMixin(ABC): logger.info(f"Using default embedding model: {model_id} with dimension {embedding_dimension}") return model_id, embedding_dimension - logger.info("DEBUG: No default embedding models found") + logger.debug("No default embedding models found") return None async def openai_list_vector_stores( diff --git a/pyproject.toml b/pyproject.toml index d55de794d..8921ed071 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -329,3 +329,4 @@ classmethod-decorators = ["classmethod", "pydantic.field_validator"] addopts = ["--durations=10"] asyncio_mode = "auto" markers = ["allow_network: Allow network access for specific unit tests"] +filterwarnings = "ignore::DeprecationWarning" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index b5eb1217d..1ae96d448 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -4,17 +4,17 @@ # This source code is licensed under the terms described in the LICENSE file in # the root directory of this source tree. -import pytest_socket - -# We need to import the fixtures here so that pytest can find them -# but ruff doesn't think they are used and removes the import. "noqa: F401" prevents them from being removed -from .fixtures import cached_disk_dist_registry, disk_dist_registry, sqlite_kvstore # noqa: F401 +import os +import warnings -def pytest_runtest_setup(item): - """Setup for each test - check if network access should be allowed.""" - if "allow_network" in item.keywords: - pytest_socket.enable_socket() - else: - # Allowing Unix sockets is necessary for some tests that use local servers and mocks - pytest_socket.disable_socket(allow_unix_socket=True) +def pytest_sessionstart(session) -> None: + if "LLAMA_STACK_LOGGING" not in os.environ: + os.environ["LLAMA_STACK_LOGGING"] = "all=WARNING" + + # Silence common deprecation spam during unit tests. + warnings.filterwarnings("ignore", category=DeprecationWarning) + warnings.filterwarnings("ignore", category=PendingDeprecationWarning) + + +pytest_plugins = ["tests.unit.fixtures"] diff --git a/tests/unit/providers/inference/test_litellm_openai_mixin.py b/tests/unit/providers/inference/test_litellm_openai_mixin.py index dc17e6abf..1f6a687d6 100644 --- a/tests/unit/providers/inference/test_litellm_openai_mixin.py +++ b/tests/unit/providers/inference/test_litellm_openai_mixin.py @@ -15,16 +15,16 @@ from llama_stack.providers.utils.inference.litellm_openai_mixin import LiteLLMOp # Test fixtures and helper classes -class TestConfig(BaseModel): +class FakeConfig(BaseModel): api_key: str | None = Field(default=None) -class TestProviderDataValidator(BaseModel): +class FakeProviderDataValidator(BaseModel): test_api_key: str | None = Field(default=None) -class TestLiteLLMAdapter(LiteLLMOpenAIMixin): - def __init__(self, config: TestConfig): +class FakeLiteLLMAdapter(LiteLLMOpenAIMixin): + def __init__(self, config: FakeConfig): super().__init__( litellm_provider_name="test", api_key_from_config=config.api_key, @@ -36,11 +36,11 @@ class TestLiteLLMAdapter(LiteLLMOpenAIMixin): @pytest.fixture def adapter_with_config_key(): """Fixture to create adapter with API key in config""" - config = TestConfig(api_key="config-api-key") - adapter = TestLiteLLMAdapter(config) + config = FakeConfig(api_key="config-api-key") + adapter = FakeLiteLLMAdapter(config) adapter.__provider_spec__ = MagicMock() adapter.__provider_spec__.provider_data_validator = ( - "tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator" + "tests.unit.providers.inference.test_litellm_openai_mixin.FakeProviderDataValidator" ) return adapter @@ -48,11 +48,11 @@ def adapter_with_config_key(): @pytest.fixture def adapter_without_config_key(): """Fixture to create adapter without API key in config""" - config = TestConfig(api_key=None) - adapter = TestLiteLLMAdapter(config) + config = FakeConfig(api_key=None) + adapter = FakeLiteLLMAdapter(config) adapter.__provider_spec__ = MagicMock() adapter.__provider_spec__.provider_data_validator = ( - "tests.unit.providers.inference.test_litellm_openai_mixin.TestProviderDataValidator" + "tests.unit.providers.inference.test_litellm_openai_mixin.FakeProviderDataValidator" ) return adapter diff --git a/tests/unit/providers/nvidia/test_safety.py b/tests/unit/providers/nvidia/test_safety.py index bfd91f466..922d7f61f 100644 --- a/tests/unit/providers/nvidia/test_safety.py +++ b/tests/unit/providers/nvidia/test_safety.py @@ -19,7 +19,7 @@ from llama_stack.providers.remote.safety.nvidia.config import NVIDIASafetyConfig from llama_stack.providers.remote.safety.nvidia.nvidia import NVIDIASafetyAdapter -class TestNVIDIASafetyAdapter(NVIDIASafetyAdapter): +class FakeNVIDIASafetyAdapter(NVIDIASafetyAdapter): """Test implementation that provides the required shield_store.""" def __init__(self, config: NVIDIASafetyConfig, shield_store): @@ -41,7 +41,7 @@ def nvidia_adapter(): shield_store = AsyncMock() shield_store.get_shield = AsyncMock() - adapter = TestNVIDIASafetyAdapter(config=config, shield_store=shield_store) + adapter = FakeNVIDIASafetyAdapter(config=config, shield_store=shield_store) return adapter