diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 0180d232ec..b9f29a584a 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -47,7 +47,7 @@ litellm_settings: # setting callback class # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] -general_settings: +# general_settings: environment_variables: # otel: True # OpenTelemetry Logger diff --git a/litellm/router.py b/litellm/router.py index 410d4964ea..0276f5a444 100644 --- a/litellm/router.py +++ b/litellm/router.py @@ -84,11 +84,11 @@ class Router: self.set_verbose = set_verbose self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2 + self.deployment_latency_map = {} if model_list: model_list = copy.deepcopy(model_list) self.set_model_list(model_list) self.healthy_deployments: List = self.model_list - self.deployment_latency_map = {} for m in model_list: self.deployment_latency_map[m["litellm_params"]["model"]] = 0