Merge pull request #2620 from BerriAI/litellm_fix_retry_logic

[ fix ] retry logic - when using router/proxy - don't retry on the litellm.completion level too
This commit is contained in:
Krish Dholakia 2024-03-21 20:56:05 -07:00 committed by GitHub
commit c980093ca4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -2752,7 +2752,12 @@ def client(original_function):
"context_window_fallback_dict", {}
)
if num_retries:
_is_litellm_router_call = "model_group" in kwargs.get(
"metadata", {}
) # check if call from litellm.router/proxy
if (
num_retries and not _is_litellm_router_call
): # only enter this if call is not from litellm router/proxy. router has it's own logic for retrying
if (
isinstance(e, openai.APIError)
or isinstance(e, openai.Timeout)
@ -3222,7 +3227,12 @@ def client(original_function):
"context_window_fallback_dict", {}
)
if num_retries:
_is_litellm_router_call = "model_group" in kwargs.get(
"metadata", {}
) # check if call from litellm.router/proxy
if (
num_retries and not _is_litellm_router_call
): # only enter this if call is not from litellm router/proxy. router has it's own logic for retrying
try:
kwargs["num_retries"] = num_retries
kwargs["original_function"] = original_function