diff --git a/.circleci/config.yml b/.circleci/config.yml index 3cfa15c99..5aaf6c9d2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,6 +31,7 @@ jobs: python -m pip install --upgrade pip python -m pip install -r .circleci/requirements.txt pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install mypy pip install "google-generativeai==0.3.2" @@ -171,6 +172,7 @@ jobs: python -m pip install --upgrade pip python -m pip install -r .circleci/requirements.txt pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" pip install "pytest-mock==3.12.0" pip install "pytest-asyncio==0.21.1" pip install mypy @@ -282,6 +284,7 @@ jobs: name: Install Dependencies command: | pip install "pytest==7.3.1" + pip install "pytest-retry==1.6.3" pip install "pytest-asyncio==0.21.1" pip install aiohttp pip install "openai==1.40.0" diff --git a/litellm/tests/test_lowest_latency_routing.py b/litellm/tests/test_lowest_latency_routing.py index 5d33bb0ed..1d477459b 100644 --- a/litellm/tests/test_lowest_latency_routing.py +++ b/litellm/tests/test_lowest_latency_routing.py @@ -299,6 +299,7 @@ async def _gather_deploy(all_deploys): @pytest.mark.parametrize( "ans_rpm", [1, 5] ) # 1 should produce nothing, 10 should select first +@pytest.mark.flaky(retries=3, delay=1) def test_get_available_endpoints_tpm_rpm_check_async(ans_rpm): """ Pass in list of 2 valid models diff --git a/litellm/tests/test_openai_batches_and_files.py b/litellm/tests/test_openai_batches_and_files.py index cad5052c2..d55d868b3 100644 --- a/litellm/tests/test_openai_batches_and_files.py +++ b/litellm/tests/test_openai_batches_and_files.py @@ -95,6 +95,7 @@ def test_create_batch(provider): @pytest.mark.parametrize("provider", ["openai", "azure"]) @pytest.mark.asyncio() +@pytest.mark.flaky(retries=3, delay=1) async def test_async_create_batch(provider): """ 1. Create File for Batch completion diff --git a/litellm/tests/test_router_caching.py b/litellm/tests/test_router_caching.py index 0f64a1bd4..cfe3bc7c0 100644 --- a/litellm/tests/test_router_caching.py +++ b/litellm/tests/test_router_caching.py @@ -1,7 +1,11 @@ #### What this tests #### # This tests caching on the router -import sys, os, time -import traceback, asyncio +import asyncio +import os +import sys +import time +import traceback + import pytest sys.path.insert( @@ -71,6 +75,7 @@ def test_router_sync_caching_with_ssl_url(): @pytest.mark.asyncio +@pytest.mark.flaky(retries=3, delay=1) async def test_acompletion_caching_on_router(): # tests acompletion + caching on router try: diff --git a/tests/test_openai_endpoints.py b/tests/test_openai_endpoints.py index 932b32551..8c883ef4d 100644 --- a/tests/test_openai_endpoints.py +++ b/tests/test_openai_endpoints.py @@ -325,6 +325,7 @@ async def test_chat_completion_ratelimit(): @pytest.mark.asyncio +@pytest.mark.flaky(retries=3, delay=1) async def test_chat_completion_different_deployments(): """ - call model group with 2 deployments