LiteLLM Minor Fixes & Improvements (12/27/2024) - p1 (#7448)

* feat(main.py): mock_response() - support 'litellm.ContextWindowExceededError' in mock response

enabled quicker router/fallback/proxy debug on context window errors

* feat(exception_mapping_utils.py): extract special litellm errors from error str if calling `litellm_proxy/` as provider

Closes https://github.com/BerriAI/litellm/issues/7259

* fix(user_api_key_auth.py): specify 'Received Proxy Server Request' is span kind server

Closes https://github.com/BerriAI/litellm/issues/7298
This commit is contained in:
Krish Dholakia 2024-12-27 19:04:39 -08:00 committed by GitHub
parent cca9cfe667
commit 67b39bacf7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 83 additions and 5 deletions

View file

@ -337,20 +337,22 @@ class ContextWindowExceededError(BadRequestError): # type: ignore
litellm_debug_info: Optional[str] = None,
):
self.status_code = 400
self.message = "litellm.ContextWindowExceededError: {}".format(message)
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
self.response = httpx.Response(status_code=400, request=request)
super().__init__(
message=self.message,
message=message,
model=self.model, # type: ignore
llm_provider=self.llm_provider, # type: ignore
response=self.response,
litellm_debug_info=self.litellm_debug_info,
) # Call the base class constructor with the parameters it needs
# set after, to make it clear the raised error is a context window exceeded error
self.message = "litellm.ContextWindowExceededError: {}".format(self.message)
def __str__(self):
_message = self.message
if self.num_retries: