From 0bdfc71f8da402c2cdbaa96ac91de2afd71b617e Mon Sep 17 00:00:00 2001 From: Yuan Tang Date: Tue, 18 Mar 2025 00:33:04 -0400 Subject: [PATCH] test: Bump slow_callback_duration to 200ms to avoid flaky remote vLLM unit tests (#1675) # What does this PR do? This avoids flaky timeout issue observed in CI builds, e.g. https://github.com/meta-llama/llama-stack/actions/runs/13907179329/job/38912865968?pr=1273 ## Test Plan Ran multiple times and pass consistently. Signed-off-by: Yuan Tang --- tests/unit/providers/inference/test_remote_vllm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/providers/inference/test_remote_vllm.py b/tests/unit/providers/inference/test_remote_vllm.py index 3afe1389e..cb0997e1a 100644 --- a/tests/unit/providers/inference/test_remote_vllm.py +++ b/tests/unit/providers/inference/test_remote_vllm.py @@ -187,8 +187,8 @@ def test_chat_completion_doesnt_block_event_loop(caplog): loop.set_debug(True) caplog.set_level(logging.WARNING) - # Log when event loop is blocked for more than 100ms - loop.slow_callback_duration = 0.1 + # Log when event loop is blocked for more than 200ms + loop.slow_callback_duration = 0.2 # Sleep for 500ms in our delayed http response sleep_time = 0.5