From a55e0a9689920e2d73cd44a1ddc9dcb05f554214 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 2 Feb 2024 08:37:42 -0800 Subject: [PATCH 1/2] (feat) show correct provider in exceptions --- litellm/utils.py | 60 ++++++++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 25 deletions(-) diff --git a/litellm/utils.py b/litellm/utils.py index a58642d49..b6b5d5a35 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5906,14 +5906,24 @@ def exception_type( or custom_llm_provider == "custom_openai" or custom_llm_provider in litellm.openai_compatible_providers ): + # custom_llm_provider is openai, make it OpenAI + if custom_llm_provider == "openai": + exception_provider = "OpenAI" + "Exception" + else: + exception_provider = ( + custom_llm_provider[0].upper() + + custom_llm_provider[1:] + + "Exception" + ) + if ( "This model's maximum context length is" in error_str or "Request too large" in error_str ): exception_mapping_worked = True raise ContextWindowExceededError( - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) @@ -5923,8 +5933,8 @@ def exception_type( ): exception_mapping_worked = True raise NotFoundError( - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) @@ -5934,8 +5944,8 @@ def exception_type( ): exception_mapping_worked = True raise ContentPolicyViolationError( - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) @@ -5945,8 +5955,8 @@ def exception_type( ): exception_mapping_worked = True raise BadRequestError( - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) @@ -5955,63 +5965,63 @@ def exception_type( if original_exception.status_code == 401: exception_mapping_worked = True raise AuthenticationError( - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, response=original_exception.response, ) elif original_exception.status_code == 404: exception_mapping_worked = True raise NotFoundError( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 408: exception_mapping_worked = True raise Timeout( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, ) elif original_exception.status_code == 422: exception_mapping_worked = True raise BadRequestError( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 429: exception_mapping_worked = True raise RateLimitError( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 503: exception_mapping_worked = True raise ServiceUnavailableError( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, response=original_exception.response, ) elif original_exception.status_code == 504: # gateway timeout error exception_mapping_worked = True raise Timeout( - message=f"OpenAIException - {original_exception.message}", + message=f"{exception_provider} - {original_exception.message}", model=model, - llm_provider="openai", + llm_provider=custom_llm_provider, ) else: exception_mapping_worked = True raise APIError( status_code=original_exception.status_code, - message=f"OpenAIException - {original_exception.message}", - llm_provider="openai", + message=f"{exception_provider} - {original_exception.message}", + llm_provider=custom_llm_provider, model=model, request=original_exception.request, ) @@ -7015,7 +7025,7 @@ def exception_type( ): # deal with edge-case invalid request error bug in openai-python sdk exception_mapping_worked = True raise BadRequestError( - message=f"OpenAIException: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}", + message=f"{exception_provider}: This can happen due to missing AZURE_API_VERSION: {str(original_exception)}", model=model, llm_provider=custom_llm_provider, response=original_exception.response, From fedb53771cba720bc7c4d6ead3884a191e8ea873 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Fri, 2 Feb 2024 08:46:42 -0800 Subject: [PATCH 2/2] (test) perplexity exception raising --- litellm/tests/test_exceptions.py | 46 ++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index 70e45341a..dfa571f00 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -444,6 +444,52 @@ def test_content_policy_violation_error_streaming(): asyncio.run(test_get_error()) +def test_completion_perplexity_exception(): + try: + import openai + + print("perplexity test\n\n") + litellm.set_verbose = True + ## Test azure call + old_azure_key = os.environ["PERPLEXITYAI_API_KEY"] + os.environ["PERPLEXITYAI_API_KEY"] = "good morning" + response = completion( + model="perplexity/mistral-7b-instruct", + messages=[{"role": "user", "content": "hello"}], + ) + os.environ["PERPLEXITYAI_API_KEY"] = old_azure_key + pytest.fail("Request should have failed - bad api key") + except openai.AuthenticationError as e: + os.environ["PERPLEXITYAI_API_KEY"] = old_azure_key + print("exception: ", e) + assert "PerplexityException" in str(e) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + +def test_completion_openai_api_key_exception(): + try: + import openai + + print("gpt-3.5 test\n\n") + litellm.set_verbose = True + ## Test azure call + old_azure_key = os.environ["OPENAI_API_KEY"] + os.environ["OPENAI_API_KEY"] = "good morning" + response = completion( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "hello"}], + ) + os.environ["OPENAI_API_KEY"] = old_azure_key + pytest.fail("Request should have failed - bad api key") + except openai.AuthenticationError as e: + os.environ["OPENAI_API_KEY"] = old_azure_key + print("exception: ", e) + assert "OpenAIException" in str(e) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + + # tesy_async_acompletion() # # test_invalid_request_error(model="command-nightly")