forked from phoenix/litellm-mirror
Merge pull request #4080 from BerriAI/litellm_predibase_exception_mapping
fix(utils.py): improved predibase exception mapping
This commit is contained in:
commit
b4fc4abb76
8 changed files with 215 additions and 38 deletions
|
@ -8725,6 +8725,75 @@ def exception_type(
|
|||
response=original_exception.response,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif hasattr(original_exception, "status_code"):
|
||||
if original_exception.status_code == 500:
|
||||
exception_mapping_worked = True
|
||||
raise litellm.InternalServerError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
llm_provider="predibase",
|
||||
model=model,
|
||||
)
|
||||
elif original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
llm_provider="predibase",
|
||||
model=model,
|
||||
)
|
||||
elif original_exception.status_code == 400:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
llm_provider="predibase",
|
||||
model=model,
|
||||
)
|
||||
elif original_exception.status_code == 404:
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
llm_provider="predibase",
|
||||
model=model,
|
||||
)
|
||||
elif original_exception.status_code == 408:
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif original_exception.status_code == 422:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif original_exception.status_code == 504: # gateway timeout error
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"PredibaseException - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
)
|
||||
elif custom_llm_provider == "bedrock":
|
||||
if (
|
||||
"too many tokens" in error_str
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue