mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
fix(router.py): don't cooldown on apiconnectionerrors
Fixes issue where model would be in cooldown due to api connection errors
This commit is contained in:
parent
8782ee444d
commit
0b06a76cf9
3 changed files with 78 additions and 5 deletions
|
@ -2107,3 +2107,51 @@ def test_router_context_window_pre_call_check(model, base_model, llm_provider):
|
|||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"Got unexpected exception on router! - {str(e)}")
|
||||
|
||||
|
||||
def test_router_cooldown_api_connection_error():
|
||||
# try:
|
||||
# _ = litellm.completion(
|
||||
# model="vertex_ai/gemini-1.5-pro",
|
||||
# messages=[{"role": "admin", "content": "Fail on this!"}],
|
||||
# )
|
||||
# except litellm.APIConnectionError as e:
|
||||
# assert (
|
||||
# Router()._is_cooldown_required(
|
||||
# exception_status=e.code, exception_str=str(e)
|
||||
# )
|
||||
# is False
|
||||
# )
|
||||
|
||||
router = Router(
|
||||
model_list=[
|
||||
{
|
||||
"model_name": "gemini-1.5-pro",
|
||||
"litellm_params": {"model": "vertex_ai/gemini-1.5-pro"},
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
try:
|
||||
router.completion(
|
||||
model="gemini-1.5-pro",
|
||||
messages=[{"role": "admin", "content": "Fail on this!"}],
|
||||
)
|
||||
except litellm.APIConnectionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
router.completion(
|
||||
model="gemini-1.5-pro",
|
||||
messages=[{"role": "admin", "content": "Fail on this!"}],
|
||||
)
|
||||
except litellm.APIConnectionError:
|
||||
pass
|
||||
|
||||
try:
|
||||
router.completion(
|
||||
model="gemini-1.5-pro",
|
||||
messages=[{"role": "admin", "content": "Fail on this!"}],
|
||||
)
|
||||
except litellm.APIConnectionError:
|
||||
pass
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue