From c0cc78b9437dff573ecf11a60d22041aebdcb58b Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 14 Dec 2023 18:57:39 +0530 Subject: [PATCH] (feat) mistral - add exception mapping --- litellm/__init__.py | 8 ++++++++ litellm/tests/test_exceptions.py | 30 ++++++++++++++++++++++++++++-- litellm/utils.py | 2 +- 3 files changed, 37 insertions(+), 3 deletions(-) diff --git a/litellm/__init__.py b/litellm/__init__.py index 3ba2ffc18..d5c29e7a7 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -163,6 +163,14 @@ openai_compatible_endpoints: List = [ "api.mistral.ai/v1" ] +# this is maintained for Exception Mapping +openai_compatible_providers: List = [ + "anyscale", + "mistral", + "deepinfra", + "perplexity" +] + # well supported replicate llms replicate_models: List = [ diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index 892a1f7ca..fd49fbdde 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -197,7 +197,7 @@ def test_completion_azure_exception(): print("good job got the correct error for azure when key not set") except Exception as e: pytest.fail(f"Error occurred: {e}") -test_completion_azure_exception() +# test_completion_azure_exception() async def asynctest_completion_azure_exception(): try: @@ -227,7 +227,6 @@ async def asynctest_completion_azure_exception(): print("Got wrong exception") print("exception", e) pytest.fail(f"Error occurred: {e}") - # import asyncio # asyncio.run( # asynctest_completion_azure_exception() @@ -261,6 +260,33 @@ def test_completion_openai_exception(): pytest.fail(f"Error occurred: {e}") # test_completion_openai_exception() +def test_completion_mistral_exception(): + # test if mistral/mistral-tiny raises openai.AuthenticationError + try: + import openai + print("Testing mistral ai exception mapping") + litellm.set_verbose=False + ## Test azure call + old_azure_key = os.environ["MISTRAL_API_KEY"] + os.environ["MISTRAL_API_KEY"] = "good morning" + response = completion( + model="mistral/mistral-tiny", + messages=[ + { + "role": "user", + "content": "hello" + } + ], + ) + print(f"response: {response}") + print(response) + except openai.AuthenticationError as e: + os.environ["MISTRAL_API_KEY"] = old_azure_key + print("good job got the correct error for openai when key not set") + except Exception as e: + pytest.fail(f"Error occurred: {e}") +# test_completion_mistral_exception() + diff --git a/litellm/utils.py b/litellm/utils.py index 53e623c14..9a718a5ed 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4163,7 +4163,7 @@ def exception_type( llm_provider=custom_llm_provider ) - if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai": + if custom_llm_provider == "openai" or custom_llm_provider == "text-completion-openai" or custom_llm_provider == "custom_openai" or custom_llm_provider in litellm.openai_compatible_providers: if "This model's maximum context length is" in error_str or "Request too large" in error_str: exception_mapping_worked = True raise ContextWindowExceededError(