mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(code quality) run ruff rule to ban unused imports (#7313)
* remove unused imports * fix AmazonConverseConfig * fix test * fix import * ruff check fixes * test fixes * fix testing * fix imports
This commit is contained in:
parent
5e344497ce
commit
c7f14e936a
347 changed files with 5473 additions and 7207 deletions
|
@ -32,7 +32,6 @@ class RedisSemanticCache(BaseCache):
|
|||
**kwargs,
|
||||
):
|
||||
from redisvl.index import SearchIndex
|
||||
from redisvl.query import VectorQuery
|
||||
|
||||
print_verbose(
|
||||
"redis semantic-cache initializing INDEX - litellm_semantic_cache_index"
|
||||
|
@ -141,7 +140,6 @@ class RedisSemanticCache(BaseCache):
|
|||
|
||||
def get_cache(self, key, **kwargs):
|
||||
print_verbose(f"sync redis semantic-cache get_cache, kwargs: {kwargs}")
|
||||
import numpy as np
|
||||
from redisvl.query import VectorQuery
|
||||
|
||||
# query
|
||||
|
@ -253,7 +251,6 @@ class RedisSemanticCache(BaseCache):
|
|||
|
||||
async def async_get_cache(self, key, **kwargs):
|
||||
print_verbose(f"async redis semantic-cache get_cache, kwargs: {kwargs}")
|
||||
import numpy as np
|
||||
from redisvl.query import VectorQuery
|
||||
|
||||
from litellm.proxy.proxy_server import llm_model_list, llm_router
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue