mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
Litellm ruff linting enforcement (#5992)
* ci(config.yml): add a 'check_code_quality' step Addresses https://github.com/BerriAI/litellm/issues/5991 * ci(config.yml): check why circle ci doesn't pick up this test * ci(config.yml): fix to run 'check_code_quality' tests * fix(__init__.py): fix unprotected import * fix(__init__.py): don't remove unused imports * build(ruff.toml): update ruff.toml to ignore unused imports * fix: fix: ruff + pyright - fix linting + type-checking errors * fix: fix linting errors * fix(lago.py): fix module init error * fix: fix linting errors * ci(config.yml): cd into correct dir for checks * fix(proxy_server.py): fix linting error * fix(utils.py): fix bare except causes ruff linting errors * fix: ruff - fix remaining linting errors * fix(clickhouse.py): use standard logging object * fix(__init__.py): fix unprotected import * fix: ruff - fix linting errors * fix: fix linting errors * ci(config.yml): cleanup code qa step (formatting handled in local_testing) * fix(_health_endpoints.py): fix ruff linting errors * ci(config.yml): just use ruff in check_code_quality pipeline for now * build(custom_guardrail.py): include missing file * style(embedding_handler.py): fix ruff check
This commit is contained in:
parent
4fa8991a90
commit
94a05ca5d0
263 changed files with 1687 additions and 3320 deletions
|
@ -33,7 +33,7 @@ def print_verbose(print_statement):
|
|||
verbose_logger.debug(print_statement)
|
||||
if litellm.set_verbose:
|
||||
print(print_statement) # noqa
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
|
@ -96,15 +96,13 @@ class InMemoryCache(BaseCache):
|
|||
"""
|
||||
for key in list(self.ttl_dict.keys()):
|
||||
if time.time() > self.ttl_dict[key]:
|
||||
removed_item = self.cache_dict.pop(key, None)
|
||||
removed_ttl_item = self.ttl_dict.pop(key, None)
|
||||
self.cache_dict.pop(key, None)
|
||||
self.ttl_dict.pop(key, None)
|
||||
|
||||
# de-reference the removed item
|
||||
# https://www.geeksforgeeks.org/diagnosing-and-fixing-memory-leaks-in-python/
|
||||
# One of the most common causes of memory leaks in Python is the retention of objects that are no longer being used.
|
||||
# This can occur when an object is referenced by another object, but the reference is never removed.
|
||||
removed_item = None
|
||||
removed_ttl_item = None
|
||||
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
print_verbose(
|
||||
|
@ -150,7 +148,7 @@ class InMemoryCache(BaseCache):
|
|||
original_cached_response = self.cache_dict[key]
|
||||
try:
|
||||
cached_response = json.loads(original_cached_response)
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = original_cached_response
|
||||
return cached_response
|
||||
return None
|
||||
|
@ -251,7 +249,7 @@ class RedisCache(BaseCache):
|
|||
self.redis_version = "Unknown"
|
||||
try:
|
||||
self.redis_version = self.redis_client.info()["redis_version"]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
### ASYNC HEALTH PING ###
|
||||
|
@ -688,7 +686,7 @@ class RedisCache(BaseCache):
|
|||
cached_response = json.loads(
|
||||
cached_response
|
||||
) # Convert string to dictionary
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = ast.literal_eval(cached_response)
|
||||
return cached_response
|
||||
|
||||
|
@ -844,7 +842,7 @@ class RedisCache(BaseCache):
|
|||
"""
|
||||
Tests if the sync redis client is correctly setup.
|
||||
"""
|
||||
print_verbose(f"Pinging Sync Redis Cache")
|
||||
print_verbose("Pinging Sync Redis Cache")
|
||||
start_time = time.time()
|
||||
try:
|
||||
response = self.redis_client.ping()
|
||||
|
@ -878,7 +876,7 @@ class RedisCache(BaseCache):
|
|||
_redis_client = self.init_async_client()
|
||||
start_time = time.time()
|
||||
async with _redis_client as redis_client:
|
||||
print_verbose(f"Pinging Async Redis Cache")
|
||||
print_verbose("Pinging Async Redis Cache")
|
||||
try:
|
||||
response = await redis_client.ping()
|
||||
## LOGGING ##
|
||||
|
@ -973,7 +971,6 @@ class RedisSemanticCache(BaseCache):
|
|||
},
|
||||
"fields": {
|
||||
"text": [{"name": "response"}],
|
||||
"text": [{"name": "prompt"}],
|
||||
"vector": [
|
||||
{
|
||||
"name": "litellm_embedding",
|
||||
|
@ -999,14 +996,14 @@ class RedisSemanticCache(BaseCache):
|
|||
|
||||
redis_url = "redis://:" + password + "@" + host + ":" + port
|
||||
print_verbose(f"redis semantic-cache redis_url: {redis_url}")
|
||||
if use_async == False:
|
||||
if use_async is False:
|
||||
self.index = SearchIndex.from_dict(schema)
|
||||
self.index.connect(redis_url=redis_url)
|
||||
try:
|
||||
self.index.create(overwrite=False) # don't overwrite existing index
|
||||
except Exception as e:
|
||||
print_verbose(f"Got exception creating semantic cache index: {str(e)}")
|
||||
elif use_async == True:
|
||||
elif use_async is True:
|
||||
schema["index"]["name"] = "litellm_semantic_cache_index_async"
|
||||
self.index = SearchIndex.from_dict(schema)
|
||||
self.index.connect(redis_url=redis_url, use_async=True)
|
||||
|
@ -1027,7 +1024,7 @@ class RedisSemanticCache(BaseCache):
|
|||
cached_response = json.loads(
|
||||
cached_response
|
||||
) # Convert string to dictionary
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = ast.literal_eval(cached_response)
|
||||
return cached_response
|
||||
|
||||
|
@ -1060,7 +1057,7 @@ class RedisSemanticCache(BaseCache):
|
|||
]
|
||||
|
||||
# Add more data
|
||||
keys = self.index.load(new_data)
|
||||
self.index.load(new_data)
|
||||
|
||||
return
|
||||
|
||||
|
@ -1092,7 +1089,7 @@ class RedisSemanticCache(BaseCache):
|
|||
)
|
||||
|
||||
results = self.index.query(query)
|
||||
if results == None:
|
||||
if results is None:
|
||||
return None
|
||||
if isinstance(results, list):
|
||||
if len(results) == 0:
|
||||
|
@ -1173,7 +1170,7 @@ class RedisSemanticCache(BaseCache):
|
|||
]
|
||||
|
||||
# Add more data
|
||||
keys = await self.index.aload(new_data)
|
||||
await self.index.aload(new_data)
|
||||
return
|
||||
|
||||
async def async_get_cache(self, key, **kwargs):
|
||||
|
@ -1222,7 +1219,7 @@ class RedisSemanticCache(BaseCache):
|
|||
return_fields=["response", "prompt", "vector_distance"],
|
||||
)
|
||||
results = await self.index.aquery(query)
|
||||
if results == None:
|
||||
if results is None:
|
||||
kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0
|
||||
return None
|
||||
if isinstance(results, list):
|
||||
|
@ -1396,7 +1393,7 @@ class QdrantSemanticCache(BaseCache):
|
|||
cached_response = json.loads(
|
||||
cached_response
|
||||
) # Convert string to dictionary
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = ast.literal_eval(cached_response)
|
||||
return cached_response
|
||||
|
||||
|
@ -1435,7 +1432,7 @@ class QdrantSemanticCache(BaseCache):
|
|||
},
|
||||
]
|
||||
}
|
||||
keys = self.sync_client.put(
|
||||
self.sync_client.put(
|
||||
url=f"{self.qdrant_api_base}/collections/{self.collection_name}/points",
|
||||
headers=self.headers,
|
||||
json=data,
|
||||
|
@ -1481,7 +1478,7 @@ class QdrantSemanticCache(BaseCache):
|
|||
)
|
||||
results = search_response.json()["result"]
|
||||
|
||||
if results == None:
|
||||
if results is None:
|
||||
return None
|
||||
if isinstance(results, list):
|
||||
if len(results) == 0:
|
||||
|
@ -1563,7 +1560,7 @@ class QdrantSemanticCache(BaseCache):
|
|||
]
|
||||
}
|
||||
|
||||
keys = await self.async_client.put(
|
||||
await self.async_client.put(
|
||||
url=f"{self.qdrant_api_base}/collections/{self.collection_name}/points",
|
||||
headers=self.headers,
|
||||
json=data,
|
||||
|
@ -1629,7 +1626,7 @@ class QdrantSemanticCache(BaseCache):
|
|||
|
||||
results = search_response.json()["result"]
|
||||
|
||||
if results == None:
|
||||
if results is None:
|
||||
kwargs.setdefault("metadata", {})["semantic-similarity"] = 0.0
|
||||
return None
|
||||
if isinstance(results, list):
|
||||
|
@ -1767,7 +1764,7 @@ class S3Cache(BaseCache):
|
|||
cached_response = json.loads(
|
||||
cached_response
|
||||
) # Convert string to dictionary
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
cached_response = ast.literal_eval(cached_response)
|
||||
if type(cached_response) is not dict:
|
||||
cached_response = dict(cached_response)
|
||||
|
@ -1845,7 +1842,7 @@ class DualCache(BaseCache):
|
|||
|
||||
self.in_memory_cache.set_cache(key, value, **kwargs)
|
||||
|
||||
if self.redis_cache is not None and local_only == False:
|
||||
if self.redis_cache is not None and local_only is False:
|
||||
self.redis_cache.set_cache(key, value, **kwargs)
|
||||
except Exception as e:
|
||||
print_verbose(e)
|
||||
|
@ -1865,7 +1862,7 @@ class DualCache(BaseCache):
|
|||
if self.in_memory_cache is not None:
|
||||
result = self.in_memory_cache.increment_cache(key, value, **kwargs)
|
||||
|
||||
if self.redis_cache is not None and local_only == False:
|
||||
if self.redis_cache is not None and local_only is False:
|
||||
result = self.redis_cache.increment_cache(key, value, **kwargs)
|
||||
|
||||
return result
|
||||
|
@ -1887,7 +1884,7 @@ class DualCache(BaseCache):
|
|||
if (
|
||||
(self.always_read_redis is True)
|
||||
and self.redis_cache is not None
|
||||
and local_only == False
|
||||
and local_only is False
|
||||
):
|
||||
# If not found in in-memory cache or always_read_redis is True, try fetching from Redis
|
||||
redis_result = self.redis_cache.get_cache(key, **kwargs)
|
||||
|
@ -1900,7 +1897,7 @@ class DualCache(BaseCache):
|
|||
|
||||
print_verbose(f"get cache: cache result: {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
verbose_logger.error(traceback.format_exc())
|
||||
|
||||
def batch_get_cache(self, keys: list, local_only: bool = False, **kwargs):
|
||||
|
@ -1913,7 +1910,7 @@ class DualCache(BaseCache):
|
|||
if in_memory_result is not None:
|
||||
result = in_memory_result
|
||||
|
||||
if None in result and self.redis_cache is not None and local_only == False:
|
||||
if None in result and self.redis_cache is not None and local_only is False:
|
||||
"""
|
||||
- for the none values in the result
|
||||
- check the redis cache
|
||||
|
@ -1933,7 +1930,7 @@ class DualCache(BaseCache):
|
|||
|
||||
print_verbose(f"async batch get cache: cache result: {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
verbose_logger.error(traceback.format_exc())
|
||||
|
||||
async def async_get_cache(self, key, local_only: bool = False, **kwargs):
|
||||
|
@ -1952,7 +1949,7 @@ class DualCache(BaseCache):
|
|||
if in_memory_result is not None:
|
||||
result = in_memory_result
|
||||
|
||||
if result is None and self.redis_cache is not None and local_only == False:
|
||||
if result is None and self.redis_cache is not None and local_only is False:
|
||||
# If not found in in-memory cache, try fetching from Redis
|
||||
redis_result = await self.redis_cache.async_get_cache(key, **kwargs)
|
||||
|
||||
|
@ -1966,7 +1963,7 @@ class DualCache(BaseCache):
|
|||
|
||||
print_verbose(f"get cache: cache result: {result}")
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
verbose_logger.error(traceback.format_exc())
|
||||
|
||||
async def async_batch_get_cache(
|
||||
|
@ -1981,7 +1978,7 @@ class DualCache(BaseCache):
|
|||
|
||||
if in_memory_result is not None:
|
||||
result = in_memory_result
|
||||
if None in result and self.redis_cache is not None and local_only == False:
|
||||
if None in result and self.redis_cache is not None and local_only is False:
|
||||
"""
|
||||
- for the none values in the result
|
||||
- check the redis cache
|
||||
|
@ -2006,7 +2003,7 @@ class DualCache(BaseCache):
|
|||
result[index] = value
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
verbose_logger.error(traceback.format_exc())
|
||||
|
||||
async def async_set_cache(self, key, value, local_only: bool = False, **kwargs):
|
||||
|
@ -2017,7 +2014,7 @@ class DualCache(BaseCache):
|
|||
if self.in_memory_cache is not None:
|
||||
await self.in_memory_cache.async_set_cache(key, value, **kwargs)
|
||||
|
||||
if self.redis_cache is not None and local_only == False:
|
||||
if self.redis_cache is not None and local_only is False:
|
||||
await self.redis_cache.async_set_cache(key, value, **kwargs)
|
||||
except Exception as e:
|
||||
verbose_logger.exception(
|
||||
|
@ -2039,7 +2036,7 @@ class DualCache(BaseCache):
|
|||
cache_list=cache_list, **kwargs
|
||||
)
|
||||
|
||||
if self.redis_cache is not None and local_only == False:
|
||||
if self.redis_cache is not None and local_only is False:
|
||||
await self.redis_cache.async_set_cache_pipeline(
|
||||
cache_list=cache_list, ttl=kwargs.pop("ttl", None), **kwargs
|
||||
)
|
||||
|
@ -2459,7 +2456,7 @@ class Cache:
|
|||
cached_response = json.loads(
|
||||
cached_response # type: ignore
|
||||
) # Convert string to dictionary
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = ast.literal_eval(cached_response) # type: ignore
|
||||
return cached_response
|
||||
return cached_result
|
||||
|
@ -2492,7 +2489,7 @@ class Cache:
|
|||
return self._get_cache_logic(
|
||||
cached_result=cached_result, max_age=max_age
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print_verbose(f"An exception occurred: {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
|
@ -2506,7 +2503,7 @@ class Cache:
|
|||
if self.should_use_cache(*args, **kwargs) is not True:
|
||||
return
|
||||
|
||||
messages = kwargs.get("messages", [])
|
||||
kwargs.get("messages", [])
|
||||
if "cache_key" in kwargs:
|
||||
cache_key = kwargs["cache_key"]
|
||||
else:
|
||||
|
@ -2522,7 +2519,7 @@ class Cache:
|
|||
return self._get_cache_logic(
|
||||
cached_result=cached_result, max_age=max_age
|
||||
)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print_verbose(f"An exception occurred: {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
|
@ -2701,7 +2698,7 @@ class DiskCache(BaseCache):
|
|||
if original_cached_response:
|
||||
try:
|
||||
cached_response = json.loads(original_cached_response) # type: ignore
|
||||
except:
|
||||
except Exception:
|
||||
cached_response = original_cached_response
|
||||
return cached_response
|
||||
return None
|
||||
|
@ -2803,7 +2800,7 @@ def enable_cache(
|
|||
if "cache" not in litellm._async_success_callback:
|
||||
litellm._async_success_callback.append("cache")
|
||||
|
||||
if litellm.cache == None:
|
||||
if litellm.cache is None:
|
||||
litellm.cache = Cache(
|
||||
type=type,
|
||||
host=host,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue