(feat) caching: Use seed, max_tokens etc in cache key

This commit is contained in:
ishaan-jaff 2023-11-23 18:16:42 -08:00
parent c89c41b3dc
commit 95b0b904cf

View file

@ -192,17 +192,13 @@ class Cache:
Returns: Returns:
str: The cache key generated from the arguments, or None if no cache key could be generated. str: The cache key generated from the arguments, or None if no cache key could be generated.
""" """
prompt = get_prompt(*args, **kwargs) cache_key =""
if prompt is not None: for param in kwargs:
cache_key = prompt # ignore litellm params here
if "model" in kwargs: if param in set(["litellm_call_id", "litellm_logging_obj"]):
cache_key += kwargs["model"] continue
elif "input" in kwargs: param_value = kwargs[param]
cache_key = " ".join(kwargs["input"]) cache_key+= f"{str(param)}: {str(param_value)}"
if "model" in kwargs:
cache_key += kwargs["model"]
else:
return None
return cache_key return cache_key
def generate_streaming_content(self, content): def generate_streaming_content(self, content):