mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
feat - include litellm exception type when raising exception
This commit is contained in:
parent
7e9777e93c
commit
33632df4c5
1 changed files with 43 additions and 42 deletions
|
@ -8555,7 +8555,7 @@ def exception_type(
|
|||
if "This model's maximum context length is" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise ContextWindowExceededError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"ContextWindowExceededError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -8579,7 +8579,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise ContentPolicyViolationError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"ContentPolicyViolationError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -8591,7 +8591,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"BadRequestError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -8599,7 +8599,7 @@ def exception_type(
|
|||
)
|
||||
elif "Request too large" in error_str:
|
||||
raise RateLimitError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"RateLimitError: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -8611,7 +8611,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"AuthenticationError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -8635,7 +8635,7 @@ def exception_type(
|
|||
if original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"AuthenticationError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
response=original_exception.response,
|
||||
|
@ -8644,7 +8644,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 404:
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"NotFoundError: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -8653,7 +8653,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 408:
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"Timeout Error: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -8661,7 +8661,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 422:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"BadRequestError: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -8670,7 +8670,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"RateLimitError: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -8679,7 +8679,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"ServiceUnavailableError: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -8688,7 +8688,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 504: # gateway timeout error
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"Timeout Error: {exception_provider} - {message}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -8697,7 +8697,7 @@ def exception_type(
|
|||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
status_code=original_exception.status_code,
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"APIError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
request=original_exception.request,
|
||||
|
@ -8706,7 +8706,7 @@ def exception_type(
|
|||
else:
|
||||
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
|
||||
raise APIConnectionError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"APIConnectionError: {exception_provider} - {message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9082,7 +9082,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException BadRequestError - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
response=original_exception.response,
|
||||
|
@ -9094,7 +9094,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException APIError - {error_str}",
|
||||
status_code=500,
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
|
@ -9104,7 +9104,7 @@ def exception_type(
|
|||
elif "403" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException BadRequestError - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
response=original_exception.response,
|
||||
|
@ -9113,7 +9113,7 @@ def exception_type(
|
|||
elif "The response was blocked." in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise UnprocessableEntityError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException UnprocessableEntityError - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9133,7 +9133,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException RateLimitError - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9149,7 +9149,7 @@ def exception_type(
|
|||
if original_exception.status_code == 400:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException BadRequestError - {error_str}",
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9158,7 +9158,7 @@ def exception_type(
|
|||
if original_exception.status_code == 500:
|
||||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
message=f"VertexAIException - {error_str}",
|
||||
message=f"VertexAIException APIError - {error_str}",
|
||||
status_code=500,
|
||||
model=model,
|
||||
llm_provider="vertex_ai",
|
||||
|
@ -9763,7 +9763,7 @@ def exception_type(
|
|||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
status_code=500,
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException Internal server error - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9772,7 +9772,7 @@ def exception_type(
|
|||
elif "This model's maximum context length is" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise ContextWindowExceededError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException ContextWindowExceededError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9781,7 +9781,7 @@ def exception_type(
|
|||
elif "DeploymentNotFound" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise NotFoundError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException NotFoundError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9796,7 +9796,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise ContentPolicyViolationError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException ContentPolicyViolationError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9805,7 +9805,7 @@ def exception_type(
|
|||
elif "invalid_request_error" in error_str:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException BadRequestError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9817,7 +9817,7 @@ def exception_type(
|
|||
):
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"{exception_provider} - {original_exception.message}",
|
||||
message=f"{exception_provider} AuthenticationError - {original_exception.message}",
|
||||
llm_provider=custom_llm_provider,
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9828,7 +9828,7 @@ def exception_type(
|
|||
if original_exception.status_code == 401:
|
||||
exception_mapping_worked = True
|
||||
raise AuthenticationError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException AuthenticationError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9837,7 +9837,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 408:
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException Timeout - {original_exception.message}",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
llm_provider="azure",
|
||||
|
@ -9845,7 +9845,7 @@ def exception_type(
|
|||
if original_exception.status_code == 422:
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException BadRequestError - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="azure",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9854,7 +9854,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 429:
|
||||
exception_mapping_worked = True
|
||||
raise RateLimitError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException RateLimitError - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="azure",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9863,7 +9863,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 503:
|
||||
exception_mapping_worked = True
|
||||
raise ServiceUnavailableError(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException ServiceUnavailableError - {original_exception.message}",
|
||||
model=model,
|
||||
llm_provider="azure",
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9872,7 +9872,7 @@ def exception_type(
|
|||
elif original_exception.status_code == 504: # gateway timeout error
|
||||
exception_mapping_worked = True
|
||||
raise Timeout(
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException Timeout - {original_exception.message}",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
llm_provider="azure",
|
||||
|
@ -9881,7 +9881,7 @@ def exception_type(
|
|||
exception_mapping_worked = True
|
||||
raise APIError(
|
||||
status_code=original_exception.status_code,
|
||||
message=f"AzureException - {original_exception.message}",
|
||||
message=f"AzureException APIError - {original_exception.message}",
|
||||
llm_provider="azure",
|
||||
litellm_debug_info=extra_information,
|
||||
model=model,
|
||||
|
@ -9892,7 +9892,7 @@ def exception_type(
|
|||
else:
|
||||
# if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors
|
||||
raise APIConnectionError(
|
||||
message=f"{exception_provider} - {message}",
|
||||
message=f"{exception_provider} APIConnectionError - {message}",
|
||||
llm_provider="azure",
|
||||
model=model,
|
||||
litellm_debug_info=extra_information,
|
||||
|
@ -9904,7 +9904,7 @@ def exception_type(
|
|||
): # deal with edge-case invalid request error bug in openai-python sdk
|
||||
exception_mapping_worked = True
|
||||
raise BadRequestError(
|
||||
message=f"{exception_provider}: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
|
||||
message=f"{exception_provider} BadRequestError : This can happen due to missing AZURE_API_VERSION: {str(original_exception)}",
|
||||
model=model,
|
||||
llm_provider=custom_llm_provider,
|
||||
response=original_exception.response,
|
||||
|
@ -11416,11 +11416,8 @@ class CustomStreamWrapper:
|
|||
self.response_id = original_chunk.id
|
||||
if len(original_chunk.choices) > 0:
|
||||
delta = original_chunk.choices[0].delta
|
||||
if (
|
||||
delta is not None and (
|
||||
delta.function_call is not None
|
||||
or delta.tool_calls is not None
|
||||
)
|
||||
if delta is not None and (
|
||||
delta.function_call is not None or delta.tool_calls is not None
|
||||
):
|
||||
try:
|
||||
model_response.system_fingerprint = (
|
||||
|
@ -11481,7 +11478,11 @@ class CustomStreamWrapper:
|
|||
model_response.choices[0].delta = Delta()
|
||||
else:
|
||||
try:
|
||||
delta = dict() if original_chunk.choices[0].delta is None else dict(original_chunk.choices[0].delta)
|
||||
delta = (
|
||||
dict()
|
||||
if original_chunk.choices[0].delta is None
|
||||
else dict(original_chunk.choices[0].delta)
|
||||
)
|
||||
print_verbose(f"original delta: {delta}")
|
||||
model_response.choices[0].delta = Delta(**delta)
|
||||
print_verbose(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue