fix(caching.py): add logging module support for caching

This commit is contained in:
Krrish Dholakia 2024-01-20 17:34:16 -08:00
parent d60af6fff5
commit 3e5b743b89
3 changed files with 23 additions and 4 deletions

View file

@ -28,3 +28,4 @@ verbose_logger = logging.getLogger("LiteLLM")
# Add the handler to the logger
verbose_router_logger.addHandler(handler)
verbose_proxy_logger.addHandler(handler)
verbose_logger.addHandler(handler)

View file

@ -12,10 +12,12 @@ import time, logging
import json, traceback, ast, hashlib
from typing import Optional, Literal, List, Union, Any
from openai._models import BaseModel as OpenAIObject
from litellm._logging import verbose_logger
def print_verbose(print_statement):
try:
verbose_logger.debug(print_statement)
if litellm.set_verbose:
print(print_statement) # noqa
except:
@ -175,7 +177,7 @@ class S3Cache(BaseCache):
CacheControl=cache_control,
ContentType="application/json",
ContentLanguage="en",
ContentDisposition=f"inline; filename=\"{key}.json\""
ContentDisposition=f'inline; filename="{key}.json"',
)
else:
cache_control = "immutable, max-age=31536000, s-maxage=31536000"
@ -187,7 +189,7 @@ class S3Cache(BaseCache):
CacheControl=cache_control,
ContentType="application/json",
ContentLanguage="en",
ContentDisposition=f"inline; filename=\"{key}.json\""
ContentDisposition=f'inline; filename="{key}.json"',
)
except Exception as e:
# NON blocking - notify users S3 is throwing an exception

View file

@ -2879,11 +2879,27 @@ async def health_readiness():
Unprotected endpoint for checking if worker can receive requests
"""
global prisma_client
cache_type = None
if litellm.cache is not None:
cache_type = litellm.cache.type
if prisma_client is not None: # if db passed in, check if it's connected
if prisma_client.db.is_connected() == True:
return {"status": "healthy", "db": "connected"}
response_object = {"db": "connected"}
return {
"status": "healthy",
"db": "connected",
"cache": cache_type,
"success_callbacks": litellm.success_callback,
}
else:
return {"status": "healthy", "db": "Not connected"}
return {
"status": "healthy",
"db": "Not connected",
"cache": cache_type,
"success_callbacks": litellm.success_callback,
}
raise HTTPException(status_code=503, detail="Service Unhealthy")