diff --git a/litellm/__init__.py b/litellm/__init__.py index 9e28559e90..d970aa2233 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -876,6 +876,7 @@ from .exceptions import ( InternalServerError, JSONSchemaValidationError, LITELLM_EXCEPTION_TYPES, + MockException, ) from .budget_manager import BudgetManager from .proxy.proxy_cli import run_server diff --git a/litellm/exceptions.py b/litellm/exceptions.py index d85510b1d8..414b3e002a 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -723,3 +723,28 @@ class InvalidRequestError(openai.BadRequestError): # type: ignore super().__init__( self.message, f"{self.model}" ) # Call the base class constructor with the parameters it needs + + +class MockException(openai.APIError): + # used for testing + def __init__( + self, + status_code, + message, + llm_provider, + model, + request: Optional[httpx.Request] = None, + litellm_debug_info: Optional[str] = None, + max_retries: Optional[int] = None, + num_retries: Optional[int] = None, + ): + self.status_code = status_code + self.message = "litellm.MockException: {}".format(message) + self.llm_provider = llm_provider + self.model = model + self.litellm_debug_info = litellm_debug_info + self.max_retries = max_retries + self.num_retries = num_retries + if request is None: + request = httpx.Request(method="POST", url="https://api.openai.com/v1") + super().__init__(self.message, request=request, body=None) # type: ignore diff --git a/litellm/main.py b/litellm/main.py index 0aeff31880..e01603b7e7 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -479,7 +479,7 @@ def mock_completion( if isinstance(mock_response, Exception): if isinstance(mock_response, openai.APIError): raise mock_response - raise litellm.APIError( + raise litellm.MockException( status_code=getattr(mock_response, "status_code", 500), # type: ignore message=getattr(mock_response, "text", str(mock_response)), llm_provider=getattr(mock_response, "llm_provider", custom_llm_provider or "openai"), # type: ignore