fix(caching.py): add more debug statements for caching

This commit is contained in:
Krrish Dholakia 2024-02-06 15:12:57 -08:00
parent d1549cb2f3
commit b523a43ba0

View file

@ -15,8 +15,11 @@ from openai._models import BaseModel as OpenAIObject
from litellm._logging import verbose_logger from litellm._logging import verbose_logger
def print_verbose(print_statement): def print_verbose(print_statement, level="DEBUG"):
try: try:
if level == "INFO":
verbose_logger.info(print_statement)
else:
verbose_logger.debug(print_statement) verbose_logger.debug(print_statement)
if litellm.set_verbose: if litellm.set_verbose:
print(print_statement) # noqa print(print_statement) # noqa
@ -124,7 +127,7 @@ class RedisCache(BaseCache):
self.redis_client.set(name=key, value=str(value), ex=ttl) self.redis_client.set(name=key, value=str(value), ex=ttl)
except Exception as e: except Exception as e:
# NON blocking - notify users Redis is throwing an exception # NON blocking - notify users Redis is throwing an exception
logging.debug("LiteLLM Caching: set() - Got exception from REDIS : ", e) print_verbose("LiteLLM Caching: set() - Got exception from REDIS : ", e)
async def async_set_cache(self, key, value, **kwargs): async def async_set_cache(self, key, value, **kwargs):
_redis_client = self.init_async_client() _redis_client = self.init_async_client()
@ -135,9 +138,12 @@ class RedisCache(BaseCache):
) )
try: try:
await redis_client.set(name=key, value=json.dumps(value), ex=ttl) await redis_client.set(name=key, value=json.dumps(value), ex=ttl)
print_verbose(
f"LiteLLM Caching: key={key} added to cache", level="INFO"
)
except Exception as e: except Exception as e:
# NON blocking - notify users Redis is throwing an exception # NON blocking - notify users Redis is throwing an exception
logging.debug("LiteLLM Caching: set() - Got exception from REDIS : ", e) print_verbose("LiteLLM Caching: set() - Got exception from REDIS : ", e)
async def async_set_cache_pipeline(self, cache_list, ttl=None): async def async_set_cache_pipeline(self, cache_list, ttl=None):
""" """
@ -166,14 +172,16 @@ class RedisCache(BaseCache):
except Exception as e: except Exception as e:
print_verbose(f"Error occurred in pipeline write - {str(e)}") print_verbose(f"Error occurred in pipeline write - {str(e)}")
# NON blocking - notify users Redis is throwing an exception # NON blocking - notify users Redis is throwing an exception
logging.debug("LiteLLM Caching: set() - Got exception from REDIS : ", e) print_verbose("LiteLLM Caching: set() - Got exception from REDIS : ", e)
def _get_cache_logic(self, cached_response: Any): def _get_cache_logic(self, cached_response: Any):
""" """
Common 'get_cache_logic' across sync + async redis client implementations Common 'get_cache_logic' across sync + async redis client implementations
""" """
print_verbose(f"original cached response: {cached_response}")
if cached_response is None: if cached_response is None:
return cached_response return cached_response
try:
# cached_response is in `b{} convert it to ModelResponse # cached_response is in `b{} convert it to ModelResponse
cached_response = cached_response.decode("utf-8") # Convert bytes to string cached_response = cached_response.decode("utf-8") # Convert bytes to string
try: try:
@ -183,6 +191,9 @@ class RedisCache(BaseCache):
except: except:
cached_response = ast.literal_eval(cached_response) cached_response = ast.literal_eval(cached_response)
return cached_response return cached_response
except Exception as e:
print_verbose(f"LiteLLM Caching: get() Got exception from REDIS : {str(e)}")
return None
def get_cache(self, key, **kwargs): def get_cache(self, key, **kwargs):
try: try:
@ -195,7 +206,7 @@ class RedisCache(BaseCache):
except Exception as e: except Exception as e:
# NON blocking - notify users Redis is throwing an exception # NON blocking - notify users Redis is throwing an exception
traceback.print_exc() traceback.print_exc()
logging.debug("LiteLLM Caching: get() - Got exception from REDIS: ", e) print_verbose("LiteLLM Caching: get() - Got exception from REDIS: ", e)
async def async_get_cache(self, key, **kwargs): async def async_get_cache(self, key, **kwargs):
_redis_client = self.init_async_client() _redis_client = self.init_async_client()
@ -211,7 +222,7 @@ class RedisCache(BaseCache):
except Exception as e: except Exception as e:
# NON blocking - notify users Redis is throwing an exception # NON blocking - notify users Redis is throwing an exception
traceback.print_exc() traceback.print_exc()
logging.debug("LiteLLM Caching: get() - Got exception from REDIS: ", e) print_verbose("LiteLLM Caching: get() - Got exception from REDIS: ", e)
def flush_cache(self): def flush_cache(self):
self.redis_client.flushall() self.redis_client.flushall()