mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(exceptions.py): use correct status code for content policy exceptions
Fixes https://github.com/BerriAI/litellm/issues/4941#issuecomment-2256578732
This commit is contained in:
parent
2a705dbb49
commit
7de70a19e4
3 changed files with 39 additions and 38 deletions
|
@ -122,7 +122,7 @@ class BadRequestError(openai.BadRequestError): # type: ignore
|
||||||
self.model = model
|
self.model = model
|
||||||
self.llm_provider = llm_provider
|
self.llm_provider = llm_provider
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
response = response or httpx.Response(
|
response = httpx.Response(
|
||||||
status_code=self.status_code,
|
status_code=self.status_code,
|
||||||
request=httpx.Request(
|
request=httpx.Request(
|
||||||
method="GET", url="https://litellm.ai"
|
method="GET", url="https://litellm.ai"
|
||||||
|
@ -287,16 +287,13 @@ class RateLimitError(openai.RateLimitError): # type: ignore
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
self.max_retries = max_retries
|
self.max_retries = max_retries
|
||||||
self.num_retries = num_retries
|
self.num_retries = num_retries
|
||||||
if response is None:
|
self.response = httpx.Response(
|
||||||
self.response = httpx.Response(
|
status_code=429,
|
||||||
status_code=429,
|
request=httpx.Request(
|
||||||
request=httpx.Request(
|
method="POST",
|
||||||
method="POST",
|
url=" https://cloud.google.com/vertex-ai/",
|
||||||
url=" https://cloud.google.com/vertex-ai/",
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.response = response
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
self.message, response=self.response, body=None
|
self.message, response=self.response, body=None
|
||||||
) # Call the base class constructor with the parameters it needs
|
) # Call the base class constructor with the parameters it needs
|
||||||
|
@ -334,7 +331,7 @@ class ContextWindowExceededError(BadRequestError): # type: ignore
|
||||||
self.llm_provider = llm_provider
|
self.llm_provider = llm_provider
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
||||||
self.response = response or httpx.Response(status_code=400, request=request)
|
self.response = httpx.Response(status_code=400, request=request)
|
||||||
super().__init__(
|
super().__init__(
|
||||||
message=self.message,
|
message=self.message,
|
||||||
model=self.model, # type: ignore
|
model=self.model, # type: ignore
|
||||||
|
@ -377,7 +374,7 @@ class RejectedRequestError(BadRequestError): # type: ignore
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
self.request_data = request_data
|
self.request_data = request_data
|
||||||
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
||||||
response = httpx.Response(status_code=500, request=request)
|
response = httpx.Response(status_code=400, request=request)
|
||||||
super().__init__(
|
super().__init__(
|
||||||
message=self.message,
|
message=self.message,
|
||||||
model=self.model, # type: ignore
|
model=self.model, # type: ignore
|
||||||
|
@ -419,7 +416,7 @@ class ContentPolicyViolationError(BadRequestError): # type: ignore
|
||||||
self.llm_provider = llm_provider
|
self.llm_provider = llm_provider
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
|
||||||
self.response = response or httpx.Response(status_code=500, request=request)
|
self.response = httpx.Response(status_code=400, request=request)
|
||||||
super().__init__(
|
super().__init__(
|
||||||
message=self.message,
|
message=self.message,
|
||||||
model=self.model, # type: ignore
|
model=self.model, # type: ignore
|
||||||
|
@ -463,16 +460,13 @@ class ServiceUnavailableError(openai.APIStatusError): # type: ignore
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
self.max_retries = max_retries
|
self.max_retries = max_retries
|
||||||
self.num_retries = num_retries
|
self.num_retries = num_retries
|
||||||
if response is None:
|
self.response = httpx.Response(
|
||||||
self.response = httpx.Response(
|
status_code=self.status_code,
|
||||||
status_code=self.status_code,
|
request=httpx.Request(
|
||||||
request=httpx.Request(
|
method="POST",
|
||||||
method="POST",
|
url=" https://cloud.google.com/vertex-ai/",
|
||||||
url=" https://cloud.google.com/vertex-ai/",
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.response = response
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
self.message, response=self.response, body=None
|
self.message, response=self.response, body=None
|
||||||
) # Call the base class constructor with the parameters it needs
|
) # Call the base class constructor with the parameters it needs
|
||||||
|
@ -512,16 +506,13 @@ class InternalServerError(openai.InternalServerError): # type: ignore
|
||||||
self.litellm_debug_info = litellm_debug_info
|
self.litellm_debug_info = litellm_debug_info
|
||||||
self.max_retries = max_retries
|
self.max_retries = max_retries
|
||||||
self.num_retries = num_retries
|
self.num_retries = num_retries
|
||||||
if response is None:
|
self.response = httpx.Response(
|
||||||
self.response = httpx.Response(
|
status_code=self.status_code,
|
||||||
status_code=self.status_code,
|
request=httpx.Request(
|
||||||
request=httpx.Request(
|
method="POST",
|
||||||
method="POST",
|
url=" https://cloud.google.com/vertex-ai/",
|
||||||
url=" https://cloud.google.com/vertex-ai/",
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.response = response
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
self.message, response=self.response, body=None
|
self.message, response=self.response, body=None
|
||||||
) # Call the base class constructor with the parameters it needs
|
) # Call the base class constructor with the parameters it needs
|
||||||
|
|
|
@ -500,6 +500,16 @@ def mock_completion(
|
||||||
llm_provider=getattr(mock_response, "llm_provider", custom_llm_provider or "openai"), # type: ignore
|
llm_provider=getattr(mock_response, "llm_provider", custom_llm_provider or "openai"), # type: ignore
|
||||||
model=model,
|
model=model,
|
||||||
)
|
)
|
||||||
|
elif isinstance(mock_response, str) and mock_response.startswith(
|
||||||
|
"Exception: content_filter_policy"
|
||||||
|
):
|
||||||
|
raise litellm.MockException(
|
||||||
|
status_code=400,
|
||||||
|
message=mock_response,
|
||||||
|
llm_provider="azure",
|
||||||
|
model=model, # type: ignore
|
||||||
|
request=httpx.Request(method="POST", url="https://api.openai.com/v1/"),
|
||||||
|
)
|
||||||
time_delay = kwargs.get("mock_delay", None)
|
time_delay = kwargs.get("mock_delay", None)
|
||||||
if time_delay is not None:
|
if time_delay is not None:
|
||||||
time.sleep(time_delay)
|
time.sleep(time_delay)
|
||||||
|
|
|
@ -3,7 +3,7 @@ model_list:
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: "*"
|
model: "*"
|
||||||
|
|
||||||
litellm_settings:
|
# litellm_settings:
|
||||||
cache: true
|
# cache: true
|
||||||
cache_params:
|
# cache_params:
|
||||||
type: redis
|
# type: redis
|
Loading…
Add table
Add a link
Reference in a new issue