mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
43 lines
1.4 KiB
Python
43 lines
1.4 KiB
Python
"""
|
|
Add the event loop to the cache key, to prevent event loop closed errors.
|
|
"""
|
|
|
|
import asyncio
|
|
|
|
from .in_memory_cache import InMemoryCache
|
|
|
|
|
|
class LLMClientCache(InMemoryCache):
|
|
def update_cache_key_with_event_loop(self, key):
|
|
"""
|
|
Add the event loop to the cache key, to prevent event loop closed errors.
|
|
If none, use the key as is.
|
|
"""
|
|
try:
|
|
try:
|
|
event_loop = asyncio.get_running_loop()
|
|
except RuntimeError:
|
|
event_loop = asyncio.new_event_loop()
|
|
|
|
stringified_event_loop = str(id(event_loop))
|
|
return f"{key}-{stringified_event_loop}"
|
|
except Exception: # handle other potential errors
|
|
return key
|
|
|
|
def set_cache(self, key, value, **kwargs):
|
|
key = self.update_cache_key_with_event_loop(key)
|
|
return super().set_cache(key, value, **kwargs)
|
|
|
|
async def async_set_cache(self, key, value, **kwargs):
|
|
key = self.update_cache_key_with_event_loop(key)
|
|
return await super().async_set_cache(key, value, **kwargs)
|
|
|
|
def get_cache(self, key, **kwargs):
|
|
key = self.update_cache_key_with_event_loop(key)
|
|
|
|
return super().get_cache(key, **kwargs)
|
|
|
|
async def async_get_cache(self, key, **kwargs):
|
|
key = self.update_cache_key_with_event_loop(key)
|
|
|
|
return await super().async_get_cache(key, **kwargs)
|