From e90fe25890426b482684dc95a4142f812bcca590 Mon Sep 17 00:00:00 2001 From: Ashwin Bharambe Date: Thu, 7 Aug 2025 15:29:53 -0700 Subject: [PATCH] fix(tests): move llama stack client init back to fixture (#3071) See inline comments --- tests/integration/conftest.py | 17 ----------------- tests/integration/fixtures/common.py | 9 +++++++-- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 592cebd89..234d762ce 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -9,15 +9,12 @@ import os import platform import textwrap import time -import warnings import pytest from dotenv import load_dotenv from llama_stack.log import get_logger -from .fixtures.common import instantiate_llama_stack_client - logger = get_logger(__name__, category="tests") @@ -34,20 +31,6 @@ def pytest_sessionstart(session): # stop macOS from complaining about duplicate OpenMP libraries os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" - # pull client instantiation to session start so all the complex logs during initialization - # don't clobber the test one-liner outputs - print("instantiating llama_stack_client") - start_time = time.time() - with warnings.catch_warnings(): - warnings.filterwarnings("ignore", category=DeprecationWarning) - - try: - session._llama_stack_client = instantiate_llama_stack_client(session) - except Exception as e: - logger.error(f"Error instantiating llama_stack_client: {e}") - session._llama_stack_client = None - print(f"llama_stack_client instantiated in {time.time() - start_time:.3f}s") - def pytest_runtest_teardown(item): # Check if the test actually ran and passed or failed, but was not skipped or an expected failure (xfail) diff --git a/tests/integration/fixtures/common.py b/tests/integration/fixtures/common.py index 4549a2fc2..c91391f19 100644 --- a/tests/integration/fixtures/common.py +++ b/tests/integration/fixtures/common.py @@ -178,8 +178,13 @@ def skip_if_no_model(request): @pytest.fixture(scope="session") def llama_stack_client(request): - client = request.session._llama_stack_client - assert client is not None, "llama_stack_client not found in session cache" + # ideally, we could do this in session start given all the complex logs during initialization + # don't clobber the test one-liner outputs. however, this also means all tests in a sub-directory + # would be forced to use llama_stack_client, which is not what we want. + print("\ninstantiating llama_stack_client") + start_time = time.time() + client = instantiate_llama_stack_client(request.session) + print(f"llama_stack_client instantiated in {time.time() - start_time:.3f}s") return client