From 69c6bbd50b6366b3919a4d30ad491d1aedb6d615 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 17:14:44 -0800 Subject: [PATCH] (chore) remove bloat: deprecated api.litellm cache --- litellm/caching.py | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index 2a042dcbf8..97b02d0f5d 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -147,32 +147,6 @@ class DualCache(BaseCache): return result except Exception as e: traceback.print_exc() - -#### DEPRECATED #### -class HostedCache(BaseCache): - def set_cache(self, key, value, **kwargs): - if "ttl" in kwargs: - logging.debug("LiteLLM Caching: TTL is not supported for hosted cache!") - # make a post request to api.litellm.ai/set_cache - import requests - url = f"https://api.litellm.ai/set_cache?key={key}&value={str(value)}" - requests.request("POST", url) # post request to set this in the hosted litellm cache - - def get_cache(self, key, **kwargs): - import requests - url = f"https://api.litellm.ai/get_cache?key={key}" - cached_response = requests.request("GET", url) - cached_response = cached_response.text - if cached_response == "NONE": # api.litellm.ai returns "NONE" if it's not a cache hit - return None - if cached_response != None: - try: - cached_response = json.loads(cached_response) # Convert string to dictionary - cached_response['cache'] = True # set cache-hit flag to True - return cached_response - except: - return cached_response - #### LiteLLM.Completion Cache #### class Cache: @@ -202,8 +176,6 @@ class Cache: self.cache = RedisCache(host, port, password) if type == "local": self.cache = InMemoryCache() - if type == "hosted": - self.cache = HostedCache() if "cache" not in litellm.input_callback: litellm.input_callback.append("cache") if "cache" not in litellm.success_callback: