diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index c4b89072b..18d591673 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -1076,7 +1076,7 @@ async def user_api_key_auth( if not _is_user_proxy_admin(user_id_information): # if non-admin if route in LiteLLMRoutes.openai_routes.value: pass - elif request['route'].name in LiteLLMRoutes.openai_route_names.value: + elif request["route"].name in LiteLLMRoutes.openai_route_names.value: pass elif ( route in LiteLLMRoutes.info_routes.value @@ -4976,7 +4976,7 @@ async def update_key_fn(request: Request, data: UpdateKeyRequest): if "duration" in non_default_values: duration = non_default_values.pop("duration") duration_s = _duration_in_seconds(duration=duration) - expires = datetime.datetime.now(timezone.utc) + timedelta(seconds=duration_s) + expires = datetime.now(timezone.utc) + timedelta(seconds=duration_s) non_default_values["expires"] = expires response = await prisma_client.update_data( diff --git a/litellm/router.py b/litellm/router.py index 392a0a0f4..529ba0f75 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -3775,7 +3775,7 @@ class Router: ) asyncio.create_task( proxy_logging_obj.slack_alerting_instance.send_alert( - message=f"Router: Cooling down deployment: {_api_base}, for {self.cooldown_time} seconds. Got exception: {str(exception_status)}. Change 'cooldown_time' + 'allowed_fails' under 'Router Settings' on proxy UI, or via config - https://docs.litellm.ai/docs/proxy/reliability#fallbacks--retries--timeouts--cooldowns", + message=f"Router: Cooling down Deployment:\nModel Name: {_model_name}\nAPI Base: {_api_base}\n{self.cooldown_time} seconds. Got exception: {str(exception_status)}. Change 'cooldown_time' + 'allowed_fails' under 'Router Settings' on proxy UI, or via config - https://docs.litellm.ai/docs/proxy/reliability#fallbacks--retries--timeouts--cooldowns", alert_type="cooldown_deployment", level="Low", )