mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(litellm_pre_call_utils.py): add support for key level caching params
This commit is contained in:
parent
c5a611ca91
commit
af1ae80277
4 changed files with 42 additions and 2 deletions
|
@ -3,6 +3,7 @@ from fastapi import Request
|
|||
from typing import Any, Dict, Optional, TYPE_CHECKING
|
||||
from litellm.proxy._types import UserAPIKeyAuth
|
||||
from litellm._logging import verbose_proxy_logger, verbose_logger
|
||||
from litellm.types.utils import SupportedCacheControls
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm.proxy.proxy_server import ProxyConfig as _ProxyConfig
|
||||
|
@ -68,6 +69,15 @@ async def add_litellm_data_to_request(
|
|||
cache_dict = parse_cache_control(cache_control_header)
|
||||
data["ttl"] = cache_dict.get("s-maxage")
|
||||
|
||||
### KEY-LEVEL CACHNG
|
||||
key_metadata = user_api_key_dict.metadata
|
||||
if "cache" in key_metadata:
|
||||
data["cache"] = {}
|
||||
if isinstance(key_metadata["cache"], dict):
|
||||
for k, v in key_metadata["cache"].items():
|
||||
if k in SupportedCacheControls:
|
||||
data["cache"][k] = v
|
||||
|
||||
verbose_proxy_logger.debug("receiving data: %s", data)
|
||||
# users can pass in 'user' param to /chat/completions. Don't override it
|
||||
if data.get("user", None) is None and user_api_key_dict.user_id is not None:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue