mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-03 09:53:45 +00:00
fix(tests): add OpenAI client connection cleanup to prevent CI hangs
Add explicit connection cleanup and shorter timeouts to OpenAI client fixtures. Fixes CI deadlock after 25+ tests due to connection pool exhaustion. Also adds 60s timeout to test_conversation_context_loading as safety net. Signed-off-by: Charlie Doern <cdoern@redhat.com>
This commit is contained in:
parent
97ccfb5e62
commit
72ee2b3548
3 changed files with 23 additions and 3 deletions
|
|
@ -323,7 +323,13 @@ def require_server(llama_stack_client):
|
|||
@pytest.fixture(scope="session")
|
||||
def openai_client(llama_stack_client, require_server):
|
||||
base_url = f"{llama_stack_client.base_url}/v1"
|
||||
return OpenAI(base_url=base_url, api_key="fake")
|
||||
client = OpenAI(base_url=base_url, api_key="fake", max_retries=0, timeout=30.0)
|
||||
yield client
|
||||
# Cleanup: close HTTP connections
|
||||
try:
|
||||
client.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(params=["openai_client", "client_with_models"])
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue