diff --git a/litellm/utils.py b/litellm/utils.py index 7d5e5235f..5da01c764 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -8561,7 +8561,7 @@ def exception_type( if "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( - message=f"{exception_provider} - {message}", + message=f"ContextWindowExceededError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, @@ -8585,7 +8585,7 @@ def exception_type( ): exception_mapping_worked = True raise ContentPolicyViolationError( - message=f"{exception_provider} - {message}", + message=f"ContentPolicyViolationError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, @@ -8597,7 +8597,7 @@ def exception_type( ): exception_mapping_worked = True raise BadRequestError( - message=f"{exception_provider} - {message}", + message=f"BadRequestError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, @@ -8605,7 +8605,7 @@ def exception_type( ) elif "Request too large" in error_str: raise RateLimitError( - message=f"{exception_provider} - {message}", + message=f"RateLimitError: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, @@ -8617,7 +8617,7 @@ def exception_type( ): exception_mapping_worked = True raise AuthenticationError( - message=f"{exception_provider} - {message}", + message=f"AuthenticationError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, @@ -8650,7 +8650,7 @@ def exception_type( elif original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( - message=f"{exception_provider} - {message}", + message=f"AuthenticationError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, response=original_exception.response, @@ -8659,7 +8659,7 @@ def exception_type( elif original_exception.status_code == 404: exception_mapping_worked = True raise NotFoundError( - message=f"{exception_provider} - {message}", + message=f"NotFoundError: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, @@ -8668,7 +8668,7 @@ def exception_type( elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( - message=f"{exception_provider} - {message}", + message=f"Timeout Error: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, litellm_debug_info=extra_information, @@ -8676,7 +8676,7 @@ def exception_type( elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( - message=f"{exception_provider} - {message}", + message=f"BadRequestError: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, @@ -8685,7 +8685,7 @@ def exception_type( elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( - message=f"{exception_provider} - {message}", + message=f"RateLimitError: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, @@ -8694,7 +8694,7 @@ def exception_type( elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( - message=f"{exception_provider} - {message}", + message=f"ServiceUnavailableError: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, @@ -8703,7 +8703,7 @@ def exception_type( elif original_exception.status_code == 504: # gateway timeout error exception_mapping_worked = True raise Timeout( - message=f"{exception_provider} - {message}", + message=f"Timeout Error: {exception_provider} - {message}", model=model, llm_provider=custom_llm_provider, litellm_debug_info=extra_information, @@ -8712,7 +8712,7 @@ def exception_type( exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, - message=f"{exception_provider} - {message}", + message=f"APIError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, request=original_exception.request, @@ -8721,7 +8721,7 @@ def exception_type( else: # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors raise APIConnectionError( - message=f"{exception_provider} - {message}", + message=f"APIConnectionError: {exception_provider} - {message}", llm_provider=custom_llm_provider, model=model, litellm_debug_info=extra_information, @@ -9097,7 +9097,7 @@ def exception_type( ): exception_mapping_worked = True raise BadRequestError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException BadRequestError - {error_str}", model=model, llm_provider="vertex_ai", response=original_exception.response, @@ -9109,7 +9109,7 @@ def exception_type( ): exception_mapping_worked = True raise APIError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException APIError - {error_str}", status_code=500, model=model, llm_provider="vertex_ai", @@ -9119,7 +9119,7 @@ def exception_type( elif "403" in error_str: exception_mapping_worked = True raise BadRequestError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException BadRequestError - {error_str}", model=model, llm_provider="vertex_ai", response=original_exception.response, @@ -9128,7 +9128,7 @@ def exception_type( elif "The response was blocked." in error_str: exception_mapping_worked = True raise UnprocessableEntityError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException UnprocessableEntityError - {error_str}", model=model, llm_provider="vertex_ai", litellm_debug_info=extra_information, @@ -9148,7 +9148,7 @@ def exception_type( ): exception_mapping_worked = True raise RateLimitError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException RateLimitError - {error_str}", model=model, llm_provider="vertex_ai", litellm_debug_info=extra_information, @@ -9165,7 +9165,7 @@ def exception_type( if original_exception.status_code == 400: exception_mapping_worked = True raise BadRequestError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException BadRequestError - {error_str}", model=model, llm_provider="vertex_ai", litellm_debug_info=extra_information, @@ -9174,7 +9174,7 @@ def exception_type( if original_exception.status_code == 500: exception_mapping_worked = True raise APIError( - message=f"VertexAIException - {error_str}", + message=f"VertexAIException APIError - {error_str}", status_code=500, model=model, llm_provider="vertex_ai", @@ -9779,7 +9779,7 @@ def exception_type( exception_mapping_worked = True raise APIError( status_code=500, - message=f"AzureException - {original_exception.message}", + message=f"AzureException Internal server error - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9788,7 +9788,7 @@ def exception_type( elif "This model's maximum context length is" in error_str: exception_mapping_worked = True raise ContextWindowExceededError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException ContextWindowExceededError - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9797,7 +9797,7 @@ def exception_type( elif "DeploymentNotFound" in error_str: exception_mapping_worked = True raise NotFoundError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException NotFoundError - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9812,7 +9812,7 @@ def exception_type( ): exception_mapping_worked = True raise ContentPolicyViolationError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException ContentPolicyViolationError - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9821,7 +9821,7 @@ def exception_type( elif "invalid_request_error" in error_str: exception_mapping_worked = True raise BadRequestError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException BadRequestError - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9833,7 +9833,7 @@ def exception_type( ): exception_mapping_worked = True raise AuthenticationError( - message=f"{exception_provider} - {original_exception.message}", + message=f"{exception_provider} AuthenticationError - {original_exception.message}", llm_provider=custom_llm_provider, model=model, litellm_debug_info=extra_information, @@ -9853,7 +9853,7 @@ def exception_type( elif original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException AuthenticationError - {original_exception.message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9862,7 +9862,7 @@ def exception_type( elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( - message=f"AzureException - {original_exception.message}", + message=f"AzureException Timeout - {original_exception.message}", model=model, litellm_debug_info=extra_information, llm_provider="azure", @@ -9870,7 +9870,7 @@ def exception_type( elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException BadRequestError - {original_exception.message}", model=model, llm_provider="azure", litellm_debug_info=extra_information, @@ -9879,7 +9879,7 @@ def exception_type( elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException RateLimitError - {original_exception.message}", model=model, llm_provider="azure", litellm_debug_info=extra_information, @@ -9888,7 +9888,7 @@ def exception_type( elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( - message=f"AzureException - {original_exception.message}", + message=f"AzureException ServiceUnavailableError - {original_exception.message}", model=model, llm_provider="azure", litellm_debug_info=extra_information, @@ -9897,7 +9897,7 @@ def exception_type( elif original_exception.status_code == 504: # gateway timeout error exception_mapping_worked = True raise Timeout( - message=f"AzureException - {original_exception.message}", + message=f"AzureException Timeout - {original_exception.message}", model=model, litellm_debug_info=extra_information, llm_provider="azure", @@ -9906,7 +9906,7 @@ def exception_type( exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, - message=f"AzureException - {original_exception.message}", + message=f"AzureException APIError - {original_exception.message}", llm_provider="azure", litellm_debug_info=extra_information, model=model, @@ -9917,7 +9917,7 @@ def exception_type( else: # if no status code then it is an APIConnectionError: https://github.com/openai/openai-python#handling-errors raise APIConnectionError( - message=f"{exception_provider} - {message}", + message=f"{exception_provider} APIConnectionError - {message}", llm_provider="azure", model=model, litellm_debug_info=extra_information, @@ -9929,7 +9929,7 @@ def exception_type( ): # deal with edge-case invalid request error bug in openai-python sdk exception_mapping_worked = True raise BadRequestError( - message=f"{exception_provider}: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}", + message=f"{exception_provider} BadRequestError : This can happen due to missing AZURE_API_VERSION: {str(original_exception)}", model=model, llm_provider=custom_llm_provider, response=original_exception.response,