diff --git a/litellm/exceptions.py b/litellm/exceptions.py index 51923f86eb..c440d7eabe 100644 --- a/litellm/exceptions.py +++ b/litellm/exceptions.py @@ -8,7 +8,7 @@ from openai.error import ( ) -class AuthenticationError(AuthenticationError): +class AuthenticationError(AuthenticationError): # type: ignore def __init__(self, message, llm_provider): self.status_code = 401 self.message = message @@ -18,7 +18,7 @@ class AuthenticationError(AuthenticationError): ) # Call the base class constructor with the parameters it needs -class InvalidRequestError(InvalidRequestError): +class InvalidRequestError(InvalidRequestError): # type: ignore def __init__(self, message, model, llm_provider): self.status_code = 400 self.message = message @@ -29,7 +29,7 @@ class InvalidRequestError(InvalidRequestError): ) # Call the base class constructor with the parameters it needs -class RateLimitError(RateLimitError): +class RateLimitError(RateLimitError): # type: ignore def __init__(self, message, llm_provider): self.status_code = 429 self.message = message @@ -39,7 +39,7 @@ class RateLimitError(RateLimitError): ) # Call the base class constructor with the parameters it needs -class ServiceUnavailableError(ServiceUnavailableError): +class ServiceUnavailableError(ServiceUnavailableError): # type: ignore def __init__(self, message, llm_provider): self.status_code = 500 self.message = message @@ -49,7 +49,7 @@ class ServiceUnavailableError(ServiceUnavailableError): ) # Call the base class constructor with the parameters it needs -class OpenAIError(OpenAIError): +class OpenAIError(OpenAIError): # type: ignore def __init__(self, original_exception): self.status_code = original_exception.http_status super().__init__( diff --git a/litellm/llms/base.py b/litellm/llms/base.py index bde09f2fb0..bf6a3dd3a3 100644 --- a/litellm/llms/base.py +++ b/litellm/llms/base.py @@ -2,11 +2,11 @@ class BaseLLM: - def validate_environment(): # set up the environment required to run the model + def validate_environment(self): # set up the environment required to run the model pass - def completion(): # logic for parsing in - calling - parsing out model completion calls + def completion(self): # logic for parsing in - calling - parsing out model completion calls pass - def embedding(): # logic for parsing in - calling - parsing out model embedding calls + def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass diff --git a/litellm/timeout.py b/litellm/timeout.py index cca4b06e70..d88e16c96e 100644 --- a/litellm/timeout.py +++ b/litellm/timeout.py @@ -11,7 +11,7 @@ from threading import Thread from openai.error import Timeout -def timeout(timeout_duration: float = None, exception_to_raise=Timeout): +def timeout(timeout_duration: float = 0.0, exception_to_raise=Timeout): """ Wraps a function to raise the specified exception if execution time is greater than the specified timeout. diff --git a/litellm/utils.py b/litellm/utils.py index 3190b56d66..a14b5f8f1e 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -504,11 +504,11 @@ def get_optional_params( def load_test_model( model: str, - custom_llm_provider: str = None, - custom_api_base: str = None, - prompt: str = None, - num_calls: int = None, - force_timeout: int = None, + custom_llm_provider: str = "", + custom_api_base: str = "", + prompt: str = "", + num_calls: int = 0, + force_timeout: int = 0, ): test_prompt = "Hey, how's it going" test_calls = 100