fix(router.py): init deployment_latency_map even if model_list is empty

This commit is contained in:
Krrish Dholakia 2023-12-18 17:50:26 -08:00
parent 34509d8dda
commit 071283c102
2 changed files with 2 additions and 2 deletions

View file

@ -47,7 +47,7 @@ litellm_settings:
# setting callback class # setting callback class
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance] # callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
general_settings: # general_settings:
environment_variables: environment_variables:
# otel: True # OpenTelemetry Logger # otel: True # OpenTelemetry Logger

View file

@ -84,11 +84,11 @@ class Router:
self.set_verbose = set_verbose self.set_verbose = set_verbose
self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2 self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2
self.deployment_latency_map = {}
if model_list: if model_list:
model_list = copy.deepcopy(model_list) model_list = copy.deepcopy(model_list)
self.set_model_list(model_list) self.set_model_list(model_list)
self.healthy_deployments: List = self.model_list self.healthy_deployments: List = self.model_list
self.deployment_latency_map = {}
for m in model_list: for m in model_list:
self.deployment_latency_map[m["litellm_params"]["model"]] = 0 self.deployment_latency_map[m["litellm_params"]["model"]] = 0