mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LiteLLM Minor Fixes & Improvements (12/27/2024) - p1 (#7448)
* feat(main.py): mock_response() - support 'litellm.ContextWindowExceededError' in mock response enabled quicker router/fallback/proxy debug on context window errors * feat(exception_mapping_utils.py): extract special litellm errors from error str if calling `litellm_proxy/` as provider Closes https://github.com/BerriAI/litellm/issues/7259 * fix(user_api_key_auth.py): specify 'Received Proxy Server Request' is span kind server Closes https://github.com/BerriAI/litellm/issues/7298
This commit is contained in:
parent
cca9cfe667
commit
67b39bacf7
7 changed files with 83 additions and 5 deletions
|
@ -1189,3 +1189,19 @@ def test_exceptions_base_class():
|
|||
assert isinstance(e, litellm.RateLimitError)
|
||||
assert e.code == "429"
|
||||
assert e.type == "throttling_error"
|
||||
|
||||
|
||||
def test_context_window_exceeded_error_from_litellm_proxy():
|
||||
from httpx import Response
|
||||
from litellm.litellm_core_utils.exception_mapping_utils import (
|
||||
extract_and_raise_litellm_exception,
|
||||
)
|
||||
|
||||
args = {
|
||||
"response": Response(status_code=400, text="Bad Request"),
|
||||
"error_str": "Error code: 400 - {'error': {'message': \"litellm.ContextWindowExceededError: litellm.BadRequestError: this is a mock context window exceeded error\\nmodel=gpt-3.5-turbo. context_window_fallbacks=None. fallbacks=None.\\n\\nSet 'context_window_fallback' - https://docs.litellm.ai/docs/routing#fallbacks\\nReceived Model Group=gpt-3.5-turbo\\nAvailable Model Group Fallbacks=None\", 'type': None, 'param': None, 'code': '400'}}",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"custom_llm_provider": "litellm_proxy",
|
||||
}
|
||||
with pytest.raises(litellm.ContextWindowExceededError):
|
||||
extract_and_raise_litellm_exception(**args)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue