LiteLLM Minor Fixes & Improvements (12/27/2024) - p1 (#7448)

* feat(main.py): mock_response() - support 'litellm.ContextWindowExceededError' in mock response

enabled quicker router/fallback/proxy debug on context window errors

* feat(exception_mapping_utils.py): extract special litellm errors from error str if calling `litellm_proxy/` as provider

Closes https://github.com/BerriAI/litellm/issues/7259

* fix(user_api_key_auth.py): specify 'Received Proxy Server Request' is span kind server

Closes https://github.com/BerriAI/litellm/issues/7298
This commit is contained in:
Krish Dholakia 2024-12-27 19:04:39 -08:00 committed by GitHub
parent cca9cfe667
commit 67b39bacf7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 83 additions and 5 deletions

View file

@ -1,6 +1,6 @@
import json
import traceback
from typing import Optional
from typing import Any, Optional
import httpx
@ -84,6 +84,41 @@ def _get_response_headers(original_exception: Exception) -> Optional[httpx.Heade
return _response_headers
import re
def extract_and_raise_litellm_exception(
response: Optional[Any],
error_str: str,
model: str,
custom_llm_provider: str,
):
"""
Covers scenario where litellm sdk calling proxy.
Enables raising the special errors raised by litellm, eg. ContextWindowExceededError.
Relevant Issue: https://github.com/BerriAI/litellm/issues/7259
"""
pattern = r"litellm\.\w+Error"
# Search for the exception in the error string
match = re.search(pattern, error_str)
# Extract the exception if found
if match:
exception_name = match.group(0)
exception_name = exception_name.strip().replace("litellm.", "")
raised_exception_obj = getattr(litellm, exception_name, None)
if raised_exception_obj:
raise raised_exception_obj(
message=error_str,
llm_provider=custom_llm_provider,
model=model,
response=response,
)
def exception_type( # type: ignore # noqa: PLR0915
model,
original_exception,
@ -197,6 +232,15 @@ def exception_type( # type: ignore # noqa: PLR0915
litellm_debug_info=extra_information,
)
if (
custom_llm_provider == "litellm_proxy"
): # handle special case where calling litellm proxy + exception str contains error message
extract_and_raise_litellm_exception(
response=getattr(original_exception, "response", None),
error_str=error_str,
model=model,
custom_llm_provider=custom_llm_provider,
)
if (
custom_llm_provider == "openai"
or custom_llm_provider == "text-completion-openai"