Compare commits

...
Sign in to create a new pull request.

5 commits

Author SHA1 Message Date
Ishaan Jaff
a202da9303 fix get attr 2024-11-14 13:31:37 -08:00
Ishaan Jaff
7801b5361d use safe access to get exception response 2024-11-14 13:00:12 -08:00
Ishaan Jaff
812be07c3b test_bad_request_error_contains_httpx_response 2024-11-14 08:49:40 -08:00
Ishaan Jaff
9168525e68 test_bad_request_error_contains_httpx_response 2024-11-14 08:46:47 -08:00
Ishaan Jaff
952f2dbe70 fix azure exceptions 2024-11-14 08:40:30 -08:00
2 changed files with 126 additions and 93 deletions

View file

@ -239,7 +239,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ContextWindowExceededError: {exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif (
@ -251,7 +251,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif "A timeout occurred" in error_str:
@ -271,7 +271,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ContentPolicyViolationError: {exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif (
@ -283,7 +283,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif "Web server is returning an unknown error" in error_str:
@ -299,7 +299,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"RateLimitError: {exception_provider} - {message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif (
@ -311,7 +311,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AuthenticationError: {exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif "Mistral API raised a streaming error" in error_str:
@ -335,7 +335,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 401:
@ -344,7 +344,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AuthenticationError: {exception_provider} - {message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 404:
@ -353,7 +353,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NotFoundError: {exception_provider} - {message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 408:
@ -516,7 +516,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "input is too long" in error_str:
exception_mapping_worked = True
@ -524,7 +524,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif exception_type == "ModelError":
exception_mapping_worked = True
@ -532,7 +532,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {error_str}",
model=model,
llm_provider="replicate",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Request was throttled" in error_str:
exception_mapping_worked = True
@ -540,7 +540,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {error_str}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
@ -549,7 +549,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
original_exception.status_code == 400
@ -560,7 +560,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 422:
exception_mapping_worked = True
@ -568,7 +568,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
model=model,
llm_provider="replicate",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -583,7 +583,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
@ -591,7 +591,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
@ -599,7 +599,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ReplicateException - {original_exception.message}",
llm_provider="replicate",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
exception_mapping_worked = True
raise APIError(
@ -631,7 +631,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{custom_llm_provider}Exception: Authentication Error - {error_str}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif "token_quota_reached" in error_str:
@ -640,7 +640,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{custom_llm_provider}Exception: Rate Limit Errror - {error_str}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"The server received an invalid response from an upstream server."
@ -750,7 +750,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {error_str}\n. Enable 'litellm.modify_params=True' (for PROXY do: `litellm_settings::modify_params: True`) to insert a dummy assistant message and fix this error.",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Malformed input request" in error_str:
exception_mapping_worked = True
@ -758,7 +758,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "A conversation must start with a user message." in error_str:
exception_mapping_worked = True
@ -766,7 +766,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {error_str}\n. Pass in default user message via `completion(..,user_continue_message=)` or enable `litellm.modify_params=True`.\nFor Proxy: do via `litellm_settings::modify_params: True` or user_continue_message under `litellm_params`",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"Unable to locate credentials" in error_str
@ -778,7 +778,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException Invalid Authentication - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "AccessDeniedException" in error_str:
exception_mapping_worked = True
@ -786,7 +786,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException PermissionDeniedError - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"throttlingException" in error_str
@ -797,7 +797,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException: Rate Limit Error - {error_str}",
model=model,
llm_provider="bedrock",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"Connect timeout on endpoint URL" in error_str
@ -836,7 +836,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
@ -844,7 +844,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 404:
exception_mapping_worked = True
@ -852,7 +852,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
llm_provider="bedrock",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -868,7 +868,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 429:
@ -877,7 +877,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 503:
@ -886,7 +886,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BedrockException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 504: # gateway timeout error
@ -907,7 +907,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"litellm.BadRequestError: SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"Input validation error: `best_of` must be > 0 and <= 2"
@ -918,7 +918,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message="SagemakerException - the value of 'n' must be > 0 and <= 2 for sagemaker endpoints",
model=model,
llm_provider="sagemaker",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"`inputs` tokens + `max_new_tokens` must be <=" in error_str
@ -929,7 +929,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {error_str}",
model=model,
llm_provider="sagemaker",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif hasattr(original_exception, "status_code"):
if original_exception.status_code == 500:
@ -951,7 +951,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
@ -959,7 +959,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 404:
exception_mapping_worked = True
@ -967,7 +967,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -986,7 +986,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 429:
@ -995,7 +995,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 503:
@ -1004,7 +1004,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"SagemakerException - {original_exception.message}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 504: # gateway timeout error
@ -1214,7 +1214,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message="GeminiException - Invalid api key",
model=model,
llm_provider="palm",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if (
"504 Deadline expired before operation could complete." in error_str
@ -1232,7 +1232,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"GeminiException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if (
"500 An internal error has occurred." in error_str
@ -1259,7 +1259,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"GeminiException - {error_str}",
model=model,
llm_provider="palm",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
# Dailed: Error occurred: 400 Request payload size exceeds the limit: 20000 bytes
elif custom_llm_provider == "cloudflare":
@ -1269,7 +1269,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"Cloudflare Exception - {original_exception.message}",
llm_provider="cloudflare",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if "must have required property" in error_str:
exception_mapping_worked = True
@ -1277,7 +1277,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"Cloudflare Exception - {original_exception.message}",
llm_provider="cloudflare",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
custom_llm_provider == "cohere" or custom_llm_provider == "cohere_chat"
@ -1291,7 +1291,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "too many tokens" in error_str:
exception_mapping_worked = True
@ -1299,7 +1299,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
model=model,
llm_provider="cohere",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif hasattr(original_exception, "status_code"):
if (
@ -1311,7 +1311,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -1326,7 +1326,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"CohereConnectionError" in exception_type
@ -1336,7 +1336,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "invalid type:" in error_str:
exception_mapping_worked = True
@ -1344,7 +1344,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Unexpected server error" in error_str:
exception_mapping_worked = True
@ -1352,7 +1352,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"CohereException - {original_exception.message}",
llm_provider="cohere",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
else:
if hasattr(original_exception, "status_code"):
@ -1372,7 +1372,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=error_str,
model=model,
llm_provider="huggingface",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "A valid user token is required" in error_str:
exception_mapping_worked = True
@ -1380,7 +1380,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=error_str,
llm_provider="huggingface",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Rate limit reached" in error_str:
exception_mapping_worked = True
@ -1388,7 +1388,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=error_str,
llm_provider="huggingface",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
@ -1397,7 +1397,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 400:
exception_mapping_worked = True
@ -1405,7 +1405,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"HuggingfaceException - {original_exception.message}",
model=model,
llm_provider="huggingface",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -1420,7 +1420,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
@ -1428,7 +1428,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"HuggingfaceException - {original_exception.message}",
llm_provider="huggingface",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
else:
exception_mapping_worked = True
@ -1447,7 +1447,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if "Bad or missing API token." in original_exception.message:
exception_mapping_worked = True
@ -1455,7 +1455,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 401:
@ -1464,7 +1464,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -1479,7 +1479,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AI21Exception - {original_exception.message}",
model=model,
llm_provider="ai21",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
@ -1487,7 +1487,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AI21Exception - {original_exception.message}",
llm_provider="ai21",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
else:
exception_mapping_worked = True
@ -1506,7 +1506,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "value is not a valid" in error_str:
exception_mapping_worked = True
@ -1514,7 +1514,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {error_str}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
else:
exception_mapping_worked = True
@ -1539,7 +1539,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
original_exception.status_code == 401
@ -1550,7 +1550,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
original_exception.status_code == 522
@ -1571,7 +1571,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {original_exception.message}",
llm_provider="nlp_cloud",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
original_exception.status_code == 500
@ -1594,7 +1594,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NLPCloudException - {original_exception.message}",
model=model,
llm_provider="nlp_cloud",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
else:
exception_mapping_worked = True
@ -1620,7 +1620,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"error" in error_response
@ -1631,7 +1631,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
llm_provider="together_ai",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"error" in error_response
@ -1642,7 +1642,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "A timeout occurred" in error_str:
exception_mapping_worked = True
@ -1661,7 +1661,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif (
"error_type" in error_response
@ -1672,7 +1672,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
if hasattr(original_exception, "status_code"):
if original_exception.status_code == 408:
@ -1688,7 +1688,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {error_response['error']}",
model=model,
llm_provider="together_ai",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
@ -1696,7 +1696,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"TogetherAIException - {original_exception.message}",
llm_provider="together_ai",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 524:
exception_mapping_worked = True
@ -1724,7 +1724,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "InvalidToken" in error_str or "No token provided" in error_str:
exception_mapping_worked = True
@ -1732,7 +1732,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif hasattr(original_exception, "status_code"):
verbose_logger.debug(
@ -1751,7 +1751,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
@ -1759,7 +1759,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 500:
exception_mapping_worked = True
@ -1767,7 +1767,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AlephAlphaException - {original_exception.message}",
llm_provider="aleph_alpha",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
raise original_exception
raise original_exception
@ -1784,7 +1784,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"OllamaException: Invalid Model/Model not loaded - {original_exception}",
model=model,
llm_provider="ollama",
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Failed to establish a new connection" in error_str:
exception_mapping_worked = True
@ -1792,7 +1792,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Invalid response object from API" in error_str:
exception_mapping_worked = True
@ -1800,7 +1800,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"OllamaException: {original_exception}",
llm_provider="ollama",
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
)
elif "Read timed out" in error_str:
exception_mapping_worked = True
@ -1834,6 +1834,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif "This model's maximum context length is" in error_str:
exception_mapping_worked = True
@ -1842,6 +1843,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif "DeploymentNotFound" in error_str:
exception_mapping_worked = True
@ -1850,6 +1852,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif (
(
@ -1870,6 +1873,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif "invalid_request_error" in error_str:
exception_mapping_worked = True
@ -1878,6 +1882,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif (
"The api_key client option must be set either by passing api_key to the client or by setting"
@ -1889,6 +1894,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider=custom_llm_provider,
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif "Connection error" in error_str:
exception_mapping_worked = True
@ -1907,6 +1913,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 401:
exception_mapping_worked = True
@ -1915,6 +1922,7 @@ def exception_type( # type: ignore # noqa: PLR0915
llm_provider="azure",
model=model,
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 408:
exception_mapping_worked = True
@ -1931,6 +1939,7 @@ def exception_type( # type: ignore # noqa: PLR0915
model=model,
llm_provider="azure",
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 429:
exception_mapping_worked = True
@ -1939,6 +1948,7 @@ def exception_type( # type: ignore # noqa: PLR0915
model=model,
llm_provider="azure",
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 503:
exception_mapping_worked = True
@ -1947,6 +1957,7 @@ def exception_type( # type: ignore # noqa: PLR0915
model=model,
llm_provider="azure",
litellm_debug_info=extra_information,
response=getattr(original_exception, "response", None),
)
elif original_exception.status_code == 504: # gateway timeout error
exception_mapping_worked = True
@ -1986,7 +1997,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"{exception_provider} - {error_str}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 401:
@ -1995,7 +2006,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"AuthenticationError: {exception_provider} - {error_str}",
llm_provider=custom_llm_provider,
model=model,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 404:
@ -2004,7 +2015,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"NotFoundError: {exception_provider} - {error_str}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 408:
@ -2021,7 +2032,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"BadRequestError: {exception_provider} - {error_str}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 429:
@ -2030,7 +2041,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"RateLimitError: {exception_provider} - {error_str}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 503:
@ -2039,7 +2050,7 @@ def exception_type( # type: ignore # noqa: PLR0915
message=f"ServiceUnavailableError: {exception_provider} - {error_str}",
model=model,
llm_provider=custom_llm_provider,
response=original_exception.response,
response=getattr(original_exception, "response", None),
litellm_debug_info=extra_information,
)
elif original_exception.status_code == 504: # gateway timeout error

View file

@ -58,6 +58,7 @@ async def test_content_policy_exception_azure():
except litellm.ContentPolicyViolationError as e:
print("caught a content policy violation error! Passed")
print("exception", e)
assert e.response is not None
assert e.litellm_debug_info is not None
assert isinstance(e.litellm_debug_info, str)
assert len(e.litellm_debug_info) > 0
@ -1152,3 +1153,24 @@ async def test_exception_with_headers_httpx(
if exception_raised is False:
print(resp)
assert exception_raised
@pytest.mark.asyncio
@pytest.mark.parametrize("model", ["azure/chatgpt-v-2", "openai/gpt-3.5-turbo"])
async def test_bad_request_error_contains_httpx_response(model):
"""
Test that the BadRequestError contains the httpx response
Relevant issue: https://github.com/BerriAI/litellm/issues/6732
"""
try:
await litellm.acompletion(
model=model,
messages=[{"role": "user", "content": "Hello world"}],
bad_arg="bad_arg",
)
pytest.fail("Expected to raise BadRequestError")
except litellm.BadRequestError as e:
print("e.response", e.response)
print("vars(e.response)", vars(e.response))
assert e.response is not None