diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ef8c0e33..08bbad6fa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -198,6 +198,7 @@ jobs: -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ -e AWS_REGION_NAME=$AWS_REGION_NAME \ + -e AUTO_INFER_REGION="True" \ -e OPENAI_API_KEY=$OPENAI_API_KEY \ -e LANGFUSE_PROJECT1_PUBLIC=$LANGFUSE_PROJECT1_PUBLIC \ -e LANGFUSE_PROJECT2_PUBLIC=$LANGFUSE_PROJECT2_PUBLIC \ diff --git a/litellm/router.py b/litellm/router.py index 32c2b61d1..e8b0f658f 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -2559,7 +2559,7 @@ class Router: # set region (if azure model) _auto_infer_region = os.environ.get("AUTO_INFER_REGION", False) - if _auto_infer_region == True: + if _auto_infer_region == True or _auto_infer_region == "True": """ Hiding behind a feature flag When there is a large amount of LLM deployments this makes startup times blow up