forked from phoenix/litellm-mirror
fix(caching.py): add logging module support for caching
This commit is contained in:
parent
d60af6fff5
commit
3e5b743b89
3 changed files with 23 additions and 4 deletions
|
@ -28,3 +28,4 @@ verbose_logger = logging.getLogger("LiteLLM")
|
||||||
# Add the handler to the logger
|
# Add the handler to the logger
|
||||||
verbose_router_logger.addHandler(handler)
|
verbose_router_logger.addHandler(handler)
|
||||||
verbose_proxy_logger.addHandler(handler)
|
verbose_proxy_logger.addHandler(handler)
|
||||||
|
verbose_logger.addHandler(handler)
|
||||||
|
|
|
@ -12,10 +12,12 @@ import time, logging
|
||||||
import json, traceback, ast, hashlib
|
import json, traceback, ast, hashlib
|
||||||
from typing import Optional, Literal, List, Union, Any
|
from typing import Optional, Literal, List, Union, Any
|
||||||
from openai._models import BaseModel as OpenAIObject
|
from openai._models import BaseModel as OpenAIObject
|
||||||
|
from litellm._logging import verbose_logger
|
||||||
|
|
||||||
|
|
||||||
def print_verbose(print_statement):
|
def print_verbose(print_statement):
|
||||||
try:
|
try:
|
||||||
|
verbose_logger.debug(print_statement)
|
||||||
if litellm.set_verbose:
|
if litellm.set_verbose:
|
||||||
print(print_statement) # noqa
|
print(print_statement) # noqa
|
||||||
except:
|
except:
|
||||||
|
@ -175,7 +177,7 @@ class S3Cache(BaseCache):
|
||||||
CacheControl=cache_control,
|
CacheControl=cache_control,
|
||||||
ContentType="application/json",
|
ContentType="application/json",
|
||||||
ContentLanguage="en",
|
ContentLanguage="en",
|
||||||
ContentDisposition=f"inline; filename=\"{key}.json\""
|
ContentDisposition=f'inline; filename="{key}.json"',
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
cache_control = "immutable, max-age=31536000, s-maxage=31536000"
|
cache_control = "immutable, max-age=31536000, s-maxage=31536000"
|
||||||
|
@ -187,7 +189,7 @@ class S3Cache(BaseCache):
|
||||||
CacheControl=cache_control,
|
CacheControl=cache_control,
|
||||||
ContentType="application/json",
|
ContentType="application/json",
|
||||||
ContentLanguage="en",
|
ContentLanguage="en",
|
||||||
ContentDisposition=f"inline; filename=\"{key}.json\""
|
ContentDisposition=f'inline; filename="{key}.json"',
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# NON blocking - notify users S3 is throwing an exception
|
# NON blocking - notify users S3 is throwing an exception
|
||||||
|
|
|
@ -2879,11 +2879,27 @@ async def health_readiness():
|
||||||
Unprotected endpoint for checking if worker can receive requests
|
Unprotected endpoint for checking if worker can receive requests
|
||||||
"""
|
"""
|
||||||
global prisma_client
|
global prisma_client
|
||||||
|
|
||||||
|
cache_type = None
|
||||||
|
if litellm.cache is not None:
|
||||||
|
cache_type = litellm.cache.type
|
||||||
if prisma_client is not None: # if db passed in, check if it's connected
|
if prisma_client is not None: # if db passed in, check if it's connected
|
||||||
if prisma_client.db.is_connected() == True:
|
if prisma_client.db.is_connected() == True:
|
||||||
return {"status": "healthy", "db": "connected"}
|
response_object = {"db": "connected"}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"status": "healthy",
|
||||||
|
"db": "connected",
|
||||||
|
"cache": cache_type,
|
||||||
|
"success_callbacks": litellm.success_callback,
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
return {"status": "healthy", "db": "Not connected"}
|
return {
|
||||||
|
"status": "healthy",
|
||||||
|
"db": "Not connected",
|
||||||
|
"cache": cache_type,
|
||||||
|
"success_callbacks": litellm.success_callback,
|
||||||
|
}
|
||||||
raise HTTPException(status_code=503, detail="Service Unhealthy")
|
raise HTTPException(status_code=503, detail="Service Unhealthy")
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue