diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index 3d8cb3c2a..fb390bb48 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -249,6 +249,25 @@ def test_completion_azure_exception(): # test_completion_azure_exception() +def test_azure_embedding_exceptions(): + try: + + response = litellm.embedding( + model="azure/azure-embedding-model", + input="hello", + messages="hello", + ) + pytest.fail(f"Bad request this should have failed but got {response}") + + except Exception as e: + print(vars(e)) + # CRUCIAL Test - Ensures our exceptions are readable and not overly complicated. some users have complained exceptions will randomly have another exception raised in our exception mapping + assert ( + e.message + == "litellm.APIError: AzureException APIError - Embeddings.create() got an unexpected keyword argument 'messages'" + ) + + async def asynctest_completion_azure_exception(): try: import openai diff --git a/litellm/utils.py b/litellm/utils.py index 103f854b6..f8e8566f8 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5810,6 +5810,18 @@ def exception_type( _model_group = _metadata.get("model_group") _deployment = _metadata.get("deployment") extra_information = f"\nModel: {model}" + + exception_provider = "Unknown" + if ( + isinstance(custom_llm_provider, str) + and len(custom_llm_provider) > 0 + ): + exception_provider = ( + custom_llm_provider[0].upper() + + custom_llm_provider[1:] + + "Exception" + ) + if _api_base: extra_information += f"\nAPI Base: `{_api_base}`" if (