LiteLLM Minor Fixes & Improvements (12/27/2024) - p1 (#7448)

* feat(main.py): mock_response() - support 'litellm.ContextWindowExceededError' in mock response

enabled quicker router/fallback/proxy debug on context window errors

* feat(exception_mapping_utils.py): extract special litellm errors from error str if calling `litellm_proxy/` as provider

Closes https://github.com/BerriAI/litellm/issues/7259

* fix(user_api_key_auth.py): specify 'Received Proxy Server Request' is span kind server

Closes https://github.com/BerriAI/litellm/issues/7298
This commit is contained in:
Krish Dholakia 2024-12-27 19:04:39 -08:00 committed by GitHub
parent cca9cfe667
commit 67b39bacf7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 83 additions and 5 deletions

View file

@ -550,6 +550,17 @@ def _handle_mock_potential_exceptions(
), # type: ignore
model=model,
)
elif (
isinstance(mock_response, str)
and mock_response == "litellm.ContextWindowExceededError"
):
raise litellm.ContextWindowExceededError(
message="this is a mock context window exceeded error",
llm_provider=getattr(
mock_response, "llm_provider", custom_llm_provider or "openai"
), # type: ignore
model=model,
)
elif (
isinstance(mock_response, str)
and mock_response == "litellm.InternalServerError"
@ -734,7 +745,7 @@ def mock_completion(
except Exception as e:
if isinstance(e, openai.APIError):
raise e
raise Exception("Mock completion response failed")
raise Exception("Mock completion response failed - {}".format(e))
@client