From 3660fb1f7fb6905955604d70830c74ae762a94f0 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 18:16:42 -0800 Subject: [PATCH] (feat) caching: Use seed, max_tokens etc in cache key --- litellm/caching.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index 97b02d0f5d..779080f81d 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -192,17 +192,13 @@ class Cache: Returns: str: The cache key generated from the arguments, or None if no cache key could be generated. """ - prompt = get_prompt(*args, **kwargs) - if prompt is not None: - cache_key = prompt - if "model" in kwargs: - cache_key += kwargs["model"] - elif "input" in kwargs: - cache_key = " ".join(kwargs["input"]) - if "model" in kwargs: - cache_key += kwargs["model"] - else: - return None + cache_key ="" + for param in kwargs: + # ignore litellm params here + if param in set(["litellm_call_id", "litellm_logging_obj"]): + continue + param_value = kwargs[param] + cache_key+= f"{str(param)}: {str(param_value)}" return cache_key def generate_streaming_content(self, content):