From 974d92ff45af7a583e1ab17178ee688ab9abd27c Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 24 Jun 2024 17:03:23 -0700 Subject: [PATCH] fix use caching lib --- litellm/caching.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/litellm/caching.py b/litellm/caching.py index e77d71dd8b..5aa41ce358 100644 --- a/litellm/caching.py +++ b/litellm/caching.py @@ -17,6 +17,7 @@ import traceback from datetime import timedelta from typing import Any, BinaryIO, List, Literal, Optional, Union +from cachetools import Cache as CachetoolsCache from openai._models import BaseModel as OpenAIObject import litellm @@ -70,7 +71,9 @@ class InMemoryCache(BaseCache): this is done to prevent overuse of System RAM """ # if users don't provider one, use the default litellm cache - self.cache_dict: dict = {} + self.cache_dict: CachetoolsCache = CachetoolsCache( + maxsize=1000, + ) self.ttl_dict: dict = {} self.default_ttl = default_ttl or 120.0 self.last_cleaned = 0 # since this is in memory we need to periodically clean it up to not overuse the machines RAM @@ -83,8 +86,6 @@ class InMemoryCache(BaseCache): async def async_set_cache(self, key, value, **kwargs): self.set_cache(key=key, value=value, **kwargs) - if time.time() - self.last_cleaned > self.default_ttl: - asyncio.create_task(self.clean_up_in_memory_cache()) async def async_set_cache_pipeline(self, cache_list, ttl=None): for cache_key, cache_value in cache_list: @@ -93,10 +94,6 @@ class InMemoryCache(BaseCache): else: self.set_cache(key=cache_key, value=cache_value) - - if time.time() - self.last_cleaned > self.default_ttl: - asyncio.create_task(self.clean_up_in_memory_cache()) - async def async_set_cache_sadd(self, key, value: List, ttl: Optional[float]): """ Add value to set @@ -108,7 +105,6 @@ class InMemoryCache(BaseCache): self.set_cache(key, init_value, ttl=ttl) return value - def get_cache(self, key, **kwargs): if key in self.cache_dict: if key in self.ttl_dict: