diff --git a/litellm/proxy/tests/test_proxy_exception_mapping.py b/litellm/proxy/tests/test_proxy_exception_mapping.py index 64a7f978d3..7fb5bedbe7 100644 --- a/litellm/proxy/tests/test_proxy_exception_mapping.py +++ b/litellm/proxy/tests/test_proxy_exception_mapping.py @@ -18,6 +18,6 @@ try: # print("Got openai Timeout Exception. Good job. The proxy mapped to OpenAI exceptions") except Exception as e: print("\n the proxy did not map to OpenAI exception. Instead got", e) - print(e.type) - print(e.message) - print(e.code) \ No newline at end of file + print(e.type) # type: ignore + print(e.message) # type: ignore + print(e.code) # type: ignore \ No newline at end of file diff --git a/litellm/router.py b/litellm/router.py index cd278864a1..b4745aa047 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -52,7 +52,7 @@ class Router: self.set_model_list(model_list) self.healthy_deployments: List = self.model_list self.deployment_latency_map = {} - self.cooldown_deployments = {} # {"gpt-3.5-turbo": time.time() when it failed / needed a cooldown} + self.cooldown_deployments: dict = {} # {"gpt-3.5-turbo": time.time() when it failed / needed a cooldown} for m in model_list: self.deployment_latency_map[m["litellm_params"]["model"]] = 0