feat(proxy_server.py): support maxage cache control

This commit is contained in:
Krrish Dholakia 2023-12-26 17:50:14 +05:30
parent a5f998375c
commit 235526625d
2 changed files with 23 additions and 1 deletions

View file

@ -1529,7 +1529,6 @@ class Router:
############ End of initializing Clients for OpenAI/Azure ###################
self.deployment_names.append(model["litellm_params"]["model"])
self.print_verbose(f"\n Initialized Model List {self.model_list}")
############ Users can either pass tpm/rpm as a litellm_param or a router param ###########
# for get_available_deployment, we use the litellm_param["rpm"]
@ -1545,6 +1544,7 @@ class Router:
):
model["litellm_params"]["tpm"] = model.get("tpm")
self.print_verbose(f"\nInitialized Model List {self.model_list}")
self.model_names = [m["model_name"] for m in model_list]
def get_model_names(self):