Merge pull request #1809 from BerriAI/litellm_embedding_caching_updates

Support caching individual items in embedding list (Async embedding only)
This commit is contained in:
Krish Dholakia 2024-02-03 21:04:23 -08:00 committed by GitHub
commit 28df60b609
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 638 additions and 196 deletions

View file

@ -7,6 +7,20 @@ import secrets, subprocess
import hashlib, uuid
import warnings
import importlib
import warnings
def showwarning(message, category, filename, lineno, file=None, line=None):
traceback_info = f"{filename}:{lineno}: {category.__name__}: {message}\n"
if file is not None:
file.write(traceback_info)
warnings.showwarning = showwarning
warnings.filterwarnings("default", category=UserWarning)
# Your client code here
messages: list = []
sys.path.insert(
@ -4053,9 +4067,12 @@ def _has_user_setup_sso():
async def shutdown_event():
global prisma_client, master_key, user_custom_auth, user_custom_key_generate
if prisma_client:
verbose_proxy_logger.debug("Disconnecting from Prisma")
await prisma_client.disconnect()
if litellm.cache is not None:
await litellm.cache.disconnect()
## RESET CUSTOM VARIABLES ##
cleanup_router_config_variables()