diff --git a/.circleci/config.yml b/.circleci/config.yml index 08bbad6fa..35707dbff 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -198,7 +198,7 @@ jobs: -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ -e AWS_REGION_NAME=$AWS_REGION_NAME \ - -e AUTO_INFER_REGION="True" \ + -e AUTO_INFER_REGION=True \ -e OPENAI_API_KEY=$OPENAI_API_KEY \ -e LANGFUSE_PROJECT1_PUBLIC=$LANGFUSE_PROJECT1_PUBLIC \ -e LANGFUSE_PROJECT2_PUBLIC=$LANGFUSE_PROJECT2_PUBLIC \ diff --git a/litellm/router.py b/litellm/router.py index e8b0f658f..ba777a44d 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2560,6 +2560,7 @@ class Router: # set region (if azure model) _auto_infer_region = os.environ.get("AUTO_INFER_REGION", False) if _auto_infer_region == True or _auto_infer_region == "True": + print("Auto inferring region") # noqa """ Hiding behind a feature flag When there is a large amount of LLM deployments this makes startup times blow up