litellm-mirror/litellm/exceptions.py

863 lines
31 KiB
Python

# +-----------------------------------------------+
# | |
# | Give Feedback / Get Help |
# | https://github.com/BerriAI/litellm/issues/new |
# | |
# +-----------------------------------------------+
#
# Thank you users! We ❤️ you! - Krrish & Ishaan
## LiteLLM versions of the OpenAI Exception Types
from typing import Optional
import httpx
import openai
from litellm.types.utils import LiteLLMCommonStrings
from litellm.litellm_core_utils.sensitive_data_masker import SensitiveDataMasker
# Initialize a single SensitiveDataMasker instance to be used across all exception classes
_sensitive_data_masker = SensitiveDataMasker()
def _mask_message(message):
"""Helper function to mask sensitive data in exception messages"""
if not message:
return message
# Directly process the message string to mask sensitive patterns
import re
# Common API key patterns (sk-, pk-, api-, etc.)
patterns = [
# OpenAI and similar keys
r'sk-[a-zA-Z0-9]{10,}',
r'sk_[a-zA-Z0-9]{10,}',
# AWS keys
r'AKIA[0-9A-Z]{16}',
r'[a-zA-Z0-9+/]{40}', # AWS secret key pattern
# Azure keys
r'[a-zA-Z0-9]{32}',
# Database connection strings
r'(mongodb(\+srv)?:\/\/)[^:]+:[^@]+@[^\/]+',
# API tokens and keys
r'key-[a-zA-Z0-9]{24,}',
r'token-[a-zA-Z0-9]{24,}',
# Named keys and secrets
r'secret_[a-zA-Z0-9]{5,}',
r'pass[a-zA-Z0-9]{3,}word',
# Generic patterns with capture groups
r'(API key[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
r'(api[_-]?key[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
r'(secret[_-]?key[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
r'(access[_-]?key[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
r'(password[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
r'(token[:=]?\s*)[\'"]?([a-zA-Z0-9_\-\.]{6,})[\'"]?',
]
# Apply masking
masked_message = message
for pattern in patterns:
if '(' in pattern and ')' in pattern: # Has capturing groups
# For patterns with capturing groups, keep the prefix and mask the value
def replace_func(match):
if len(match.groups()) > 1:
return match.group(1) + _sensitive_data_masker._mask_value(match.group(2))
return _sensitive_data_masker._mask_value(match.group(0))
masked_message = re.sub(pattern, replace_func, masked_message)
else:
# For patterns without capturing groups, mask the entire match
masked_message = re.sub(pattern, lambda m: _sensitive_data_masker._mask_value(m.group(0)), masked_message)
return masked_message
class AuthenticationError(openai.AuthenticationError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 401
self.message = _mask_message("litellm.AuthenticationError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
self.response = response or httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
)
super().__init__(
self.message, response=self.response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# raise when invalid models passed, example gpt-8
class NotFoundError(openai.NotFoundError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 404
self.message = _mask_message("litellm.NotFoundError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
self.response = response or httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
)
super().__init__(
self.message, response=self.response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class BadRequestError(openai.BadRequestError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
body: Optional[dict] = None,
):
self.status_code = 400
self.message = _mask_message("litellm.BadRequestError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
response = httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
)
self.max_retries = max_retries
self.num_retries = num_retries
super().__init__(
self.message, response=response, body=body
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class UnprocessableEntityError(openai.UnprocessableEntityError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
response: httpx.Response,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 422
self.message = _mask_message("litellm.UnprocessableEntityError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
super().__init__(
self.message, response=response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class Timeout(openai.APITimeoutError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
headers: Optional[dict] = None,
):
request = httpx.Request(
method="POST",
url="https://api.openai.com/v1",
)
super().__init__(
request=request
) # Call the base class constructor with the parameters it needs
self.status_code = 408
self.message = _mask_message("litellm.Timeout: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
self.headers = headers
# custom function to convert to str
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class PermissionDeniedError(openai.PermissionDeniedError): # type:ignore
def __init__(
self,
message,
llm_provider,
model,
response: httpx.Response,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 403
self.message = _mask_message("litellm.PermissionDeniedError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
super().__init__(
self.message, response=response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class RateLimitError(openai.RateLimitError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 429
self.message = _mask_message("litellm.RateLimitError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
_response_headers = (
getattr(response, "headers", None) if response is not None else None
)
self.response = httpx.Response(
status_code=429,
headers=_response_headers,
request=httpx.Request(
method="POST",
url=" https://cloud.google.com/vertex-ai/",
),
)
super().__init__(
self.message, response=self.response, body=None
) # Call the base class constructor with the parameters it needs
self.code = "429"
self.type = "throttling_error"
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# sub class of rate limit error - meant to give more granularity for error handling context window exceeded errors
class ContextWindowExceededError(BadRequestError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
):
self.status_code = 400
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
self.response = httpx.Response(status_code=400, request=request)
super().__init__(
message=message,
model=self.model, # type: ignore
llm_provider=self.llm_provider, # type: ignore
response=self.response,
litellm_debug_info=self.litellm_debug_info,
) # Call the base class constructor with the parameters it needs
# set after, to make it clear the raised error is a context window exceeded error
self.message = _mask_message("litellm.ContextWindowExceededError: {}".format(self.message))
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# sub class of bad request error - meant to help us catch guardrails-related errors on proxy.
class RejectedRequestError(BadRequestError): # type: ignore
def __init__(
self,
message,
model,
llm_provider,
request_data: dict,
litellm_debug_info: Optional[str] = None,
):
self.status_code = 400
self.message = _mask_message("litellm.RejectedRequestError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
self.request_data = request_data
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
response = httpx.Response(status_code=400, request=request)
super().__init__(
message=self.message,
model=self.model, # type: ignore
llm_provider=self.llm_provider, # type: ignore
response=response,
litellm_debug_info=self.litellm_debug_info,
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class ContentPolicyViolationError(BadRequestError): # type: ignore
# Error code: 400 - {'error': {'code': 'content_policy_violation', 'message': 'Your request was rejected as a result of our safety system. Image descriptions generated from your prompt may contain text that is not allowed by our safety system. If you believe this was done in error, your request may succeed if retried, or by adjusting your prompt.', 'param': None, 'type': 'invalid_request_error'}}
def __init__(
self,
message,
model,
llm_provider,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
):
self.status_code = 400
self.message = _mask_message("litellm.ContentPolicyViolationError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
self.response = httpx.Response(status_code=400, request=request)
super().__init__(
message=self.message,
model=self.model, # type: ignore
llm_provider=self.llm_provider, # type: ignore
response=self.response,
litellm_debug_info=self.litellm_debug_info,
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class ServiceUnavailableError(openai.APIStatusError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 503
self.message = _mask_message("litellm.ServiceUnavailableError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
self.response = httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="POST",
url=" https://cloud.google.com/vertex-ai/",
),
)
super().__init__(
self.message, response=self.response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class InternalServerError(openai.InternalServerError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 500
self.message = _mask_message("litellm.InternalServerError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
self.response = httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="POST",
url=" https://cloud.google.com/vertex-ai/",
),
)
super().__init__(
self.message, response=self.response, body=None
) # Call the base class constructor with the parameters it needs
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# raise this when the API returns an invalid response object - https://github.com/openai/openai-python/blob/1be14ee34a0f8e42d3f9aa5451aa4cb161f1781f/openai/api_requestor.py#L401
class APIError(openai.APIError): # type: ignore
def __init__(
self,
status_code: int,
message,
llm_provider,
model,
request: Optional[httpx.Request] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = status_code
self.message = _mask_message("litellm.APIError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
if request is None:
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
super().__init__(self.message, request=request, body=None) # type: ignore
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# raised if an invalid request (not get, delete, put, post) is made
class APIConnectionError(openai.APIConnectionError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
request: Optional[httpx.Request] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.message = _mask_message("litellm.APIConnectionError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.status_code = 500
self.litellm_debug_info = litellm_debug_info
self.request = httpx.Request(method="POST", url="https://api.openai.com/v1")
self.max_retries = max_retries
self.num_retries = num_retries
super().__init__(message=self.message, request=self.request)
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
# raised if an invalid request (not get, delete, put, post) is made
class APIResponseValidationError(openai.APIResponseValidationError): # type: ignore
def __init__(
self,
message,
llm_provider,
model,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.message = _mask_message("litellm.APIResponseValidationError: {}".format(message))
self.llm_provider = llm_provider
self.model = model
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
response = httpx.Response(status_code=500, request=request)
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
super().__init__(response=response, body=None, message=message)
def __str__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
def __repr__(self):
_message = self.message
if self.num_retries:
_message += f" LiteLLM Retried: {self.num_retries} times"
if self.max_retries:
_message += f", LiteLLM Max Retries: {self.max_retries}"
return _message
class JSONSchemaValidationError(APIResponseValidationError):
def __init__(
self, model: str, llm_provider: str, raw_response: str, schema: str
) -> None:
self.raw_response = raw_response
self.schema = schema
self.model = model
message = _mask_message("litellm.JSONSchemaValidationError: model={}, returned an invalid response={}, for schema={}.\nAccess raw response with `e.raw_response`".format(
model, raw_response, schema
))
self.message = message
super().__init__(model=model, message=message, llm_provider=llm_provider)
class OpenAIError(openai.OpenAIError): # type: ignore
def __init__(self, original_exception=None):
super().__init__()
self.llm_provider = "openai"
class UnsupportedParamsError(BadRequestError):
def __init__(
self,
message,
llm_provider: Optional[str] = None,
model: Optional[str] = None,
status_code: int = 400,
response: Optional[httpx.Response] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = 400
self.message = _mask_message("litellm.UnsupportedParamsError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
self.litellm_debug_info = litellm_debug_info
response = response or httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
)
self.max_retries = max_retries
self.num_retries = num_retries
LITELLM_EXCEPTION_TYPES = [
AuthenticationError,
NotFoundError,
BadRequestError,
UnprocessableEntityError,
UnsupportedParamsError,
Timeout,
PermissionDeniedError,
RateLimitError,
ContextWindowExceededError,
RejectedRequestError,
ContentPolicyViolationError,
InternalServerError,
ServiceUnavailableError,
APIError,
APIConnectionError,
APIResponseValidationError,
OpenAIError,
InternalServerError,
JSONSchemaValidationError,
]
class BudgetExceededError(Exception):
def __init__(
self, current_cost: float, max_budget: float, message: Optional[str] = None
):
self.current_cost = current_cost
self.max_budget = max_budget
message = (
message
or _mask_message(f"Budget has been exceeded! Current cost: {current_cost}, Max budget: {max_budget}")
)
self.message = message
super().__init__(message)
## DEPRECATED ##
class InvalidRequestError(openai.BadRequestError): # type: ignore
def __init__(self, message, model, llm_provider):
self.status_code = 400
self.message = _mask_message("litellm.InvalidRequestError: {}".format(message))
self.model = model
self.llm_provider = llm_provider
response = httpx.Response(
status_code=self.status_code,
request=httpx.Request(
method="GET", url="https://litellm.ai"
), # mock request object
)
super().__init__(
self.message, response=response, body=None
) # Call the base class constructor with the parameters it needs
class MockException(openai.APIError):
# used for testing
def __init__(
self,
status_code: int,
message,
llm_provider,
model,
request: Optional[httpx.Request] = None,
litellm_debug_info: Optional[str] = None,
max_retries: Optional[int] = None,
num_retries: Optional[int] = None,
):
self.status_code = status_code
self.message = _mask_message("litellm.MockException: {}".format(message))
self.llm_provider = llm_provider
self.model = model
self.litellm_debug_info = litellm_debug_info
self.max_retries = max_retries
self.num_retries = num_retries
if request is None:
request = httpx.Request(method="POST", url="https://api.openai.com/v1")
super().__init__(self.message, request=request, body=None) # type: ignore
class LiteLLMUnknownProvider(BadRequestError):
def __init__(self, model: str, custom_llm_provider: Optional[str] = None):
self.message = _mask_message(LiteLLMCommonStrings.llm_provider_not_provided.value.format(
model=model, custom_llm_provider=custom_llm_provider
))
super().__init__(
self.message, model=model, llm_provider=custom_llm_provider, response=None
)
def __str__(self):
return self.message