feat(proxy_server.py): support maxage cache control

This commit is contained in:
Krrish Dholakia 2023-12-26 17:50:14 +05:30
parent a5f998375c
commit 235526625d
2 changed files with 23 additions and 1 deletions

View file

@ -995,6 +995,20 @@ def get_litellm_model_info(model: dict = {}):
return {}
def parse_cache_control(cache_control):
cache_dict = {}
directives = cache_control.split(", ")
for directive in directives:
if "=" in directive:
key, value = directive.split("=")
cache_dict[key] = value
else:
cache_dict[directive] = True
return cache_dict
@router.on_event("startup")
async def startup_event():
global prisma_client, master_key, use_background_health_checks
@ -1223,6 +1237,14 @@ async def chat_completion(
"body": copy.copy(data), # use copy instead of deepcopy
}
## Cache Controls
headers = request.headers
print("Request Headers:", headers)
cache_control_header = headers.get("Cache-Control", None)
if cache_control_header:
cache_dict = parse_cache_control(cache_control_header)
data["ttl"] = cache_dict.get("s-maxage")
print_verbose(f"receiving data: {data}")
data["model"] = (
general_settings.get("completion_model", None) # server default

View file

@ -1529,7 +1529,6 @@ class Router:
############ End of initializing Clients for OpenAI/Azure ###################
self.deployment_names.append(model["litellm_params"]["model"])
self.print_verbose(f"\n Initialized Model List {self.model_list}")
############ Users can either pass tpm/rpm as a litellm_param or a router param ###########
# for get_available_deployment, we use the litellm_param["rpm"]
@ -1545,6 +1544,7 @@ class Router:
):
model["litellm_params"]["tpm"] = model.get("tpm")
self.print_verbose(f"\nInitialized Model List {self.model_list}")
self.model_names = [m["model_name"] for m in model_list]
def get_model_names(self):