mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
fix flaky tests
This commit is contained in:
parent
0f999524c9
commit
64398fe123
4 changed files with 4 additions and 0 deletions
|
@ -4078,6 +4078,7 @@ def test_completion_nvidia_nim():
|
||||||
# "gemini-1.5-flash",
|
# "gemini-1.5-flash",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@pytest.mark.flaky(retries=3, delay=1)
|
||||||
def test_completion_gemini(model):
|
def test_completion_gemini(model):
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
model_name = "gemini/{}".format(model)
|
model_name = "gemini/{}".format(model)
|
||||||
|
|
|
@ -758,6 +758,7 @@ async def test_streaming_router_tpm_limit():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.flaky(retries=3, delay=1)
|
||||||
async def test_bad_router_call():
|
async def test_bad_router_call():
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
model_list = [
|
model_list = [
|
||||||
|
|
|
@ -142,6 +142,7 @@ async def test_acompletion_caching_on_router():
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.flaky(retries=3, delay=1)
|
||||||
async def test_completion_caching_on_router():
|
async def test_completion_caching_on_router():
|
||||||
# tests completion + caching on router
|
# tests completion + caching on router
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -714,6 +714,7 @@ async def test_acompletion_claude_2_stream():
|
||||||
[True, False],
|
[True, False],
|
||||||
) # ,
|
) # ,
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@pytest.mark.flaky(retries=3, delay=1)
|
||||||
async def test_completion_gemini_stream(sync_mode):
|
async def test_completion_gemini_stream(sync_mode):
|
||||||
try:
|
try:
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue