mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LiteLLM Minor Fixes & Improvements (12/27/2024) - p1 (#7448)
* feat(main.py): mock_response() - support 'litellm.ContextWindowExceededError' in mock response enabled quicker router/fallback/proxy debug on context window errors * feat(exception_mapping_utils.py): extract special litellm errors from error str if calling `litellm_proxy/` as provider Closes https://github.com/BerriAI/litellm/issues/7259 * fix(user_api_key_auth.py): specify 'Received Proxy Server Request' is span kind server Closes https://github.com/BerriAI/litellm/issues/7298
This commit is contained in:
parent
cca9cfe667
commit
67b39bacf7
7 changed files with 83 additions and 5 deletions
|
@ -550,6 +550,17 @@ def _handle_mock_potential_exceptions(
|
|||
), # type: ignore
|
||||
model=model,
|
||||
)
|
||||
elif (
|
||||
isinstance(mock_response, str)
|
||||
and mock_response == "litellm.ContextWindowExceededError"
|
||||
):
|
||||
raise litellm.ContextWindowExceededError(
|
||||
message="this is a mock context window exceeded error",
|
||||
llm_provider=getattr(
|
||||
mock_response, "llm_provider", custom_llm_provider or "openai"
|
||||
), # type: ignore
|
||||
model=model,
|
||||
)
|
||||
elif (
|
||||
isinstance(mock_response, str)
|
||||
and mock_response == "litellm.InternalServerError"
|
||||
|
@ -734,7 +745,7 @@ def mock_completion(
|
|||
except Exception as e:
|
||||
if isinstance(e, openai.APIError):
|
||||
raise e
|
||||
raise Exception("Mock completion response failed")
|
||||
raise Exception("Mock completion response failed - {}".format(e))
|
||||
|
||||
|
||||
@client
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue