Merge pull request #4080 from BerriAI/litellm_predibase_exception_mapping

fix(utils.py): improved predibase exception mapping
This commit is contained in:
Krish Dholakia 2024-06-08 20:27:44 -07:00 committed by GitHub
commit b4fc4abb76
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 215 additions and 38 deletions

View file

@ -8725,6 +8725,75 @@ def exception_type(
response=original_exception.response,
litellm_debug_info=extra_information,
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 500:
exception_mapping_worked = True
raise litellm.InternalServerError(
message=f"PredibaseException - {original_exception.message}",
llm_provider="predibase",
model=model,
)
elif original_exception.status_code == 401:
exception_mapping_worked = True
raise AuthenticationError(
message=f"PredibaseException - {original_exception.message}",
llm_provider="predibase",
model=model,
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
raise BadRequestError(
message=f"PredibaseException - {original_exception.message}",
llm_provider="predibase",
model=model,
)
elif original_exception.status_code == 404:
exception_mapping_worked = True
raise NotFoundError(
message=f"PredibaseException - {original_exception.message}",
llm_provider="predibase",
model=model,
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
raise Timeout(
message=f"PredibaseException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 422:
exception_mapping_worked = True
raise BadRequestError(
message=f"PredibaseException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
raise RateLimitError(
message=f"PredibaseException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
raise ServiceUnavailableError(
message=f"PredibaseException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True
raise Timeout(
message=f"PredibaseException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
litellm_debug_info=extra_information,
)
elif custom_llm_provider == "bedrock":
if (
"too many tokens" in error_str