mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(router.py): init deployment_latency_map even if model_list is empty
This commit is contained in:
parent
34509d8dda
commit
071283c102
2 changed files with 2 additions and 2 deletions
|
@ -47,7 +47,7 @@ litellm_settings:
|
||||||
# setting callback class
|
# setting callback class
|
||||||
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
|
# callbacks: custom_callbacks.proxy_handler_instance # sets litellm.callbacks = [proxy_handler_instance]
|
||||||
|
|
||||||
general_settings:
|
# general_settings:
|
||||||
|
|
||||||
environment_variables:
|
environment_variables:
|
||||||
# otel: True # OpenTelemetry Logger
|
# otel: True # OpenTelemetry Logger
|
||||||
|
|
|
@ -84,11 +84,11 @@ class Router:
|
||||||
|
|
||||||
self.set_verbose = set_verbose
|
self.set_verbose = set_verbose
|
||||||
self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2
|
self.deployment_names: List = [] # names of models under litellm_params. ex. azure/chatgpt-v-2
|
||||||
|
self.deployment_latency_map = {}
|
||||||
if model_list:
|
if model_list:
|
||||||
model_list = copy.deepcopy(model_list)
|
model_list = copy.deepcopy(model_list)
|
||||||
self.set_model_list(model_list)
|
self.set_model_list(model_list)
|
||||||
self.healthy_deployments: List = self.model_list
|
self.healthy_deployments: List = self.model_list
|
||||||
self.deployment_latency_map = {}
|
|
||||||
for m in model_list:
|
for m in model_list:
|
||||||
self.deployment_latency_map[m["litellm_params"]["model"]] = 0
|
self.deployment_latency_map[m["litellm_params"]["model"]] = 0
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue