forked from phoenix/litellm-mirror
with caching
This commit is contained in:
parent
e9c2f3b55d
commit
a1b64832a7
2 changed files with 7 additions and 7 deletions
|
@ -724,29 +724,29 @@ def add_cache(messages, model_response):
|
|||
global cache_collection
|
||||
if cache_collection == None:
|
||||
make_collection()
|
||||
print("cache collection in add cache", cache_collection)
|
||||
# print("cache collection in add cache", cache_collection)
|
||||
user_question = message_to_user_question(messages)
|
||||
cache_collection.add(
|
||||
documents=[user_question],
|
||||
metadatas=[{"model_response": str(model_response)}],
|
||||
ids = [ str(uuid.uuid4())]
|
||||
)
|
||||
print("in add cache, peek()", cache_collection.peek())
|
||||
# print("in add cache, peek()", cache_collection.peek())
|
||||
return
|
||||
|
||||
def get_cache(messages):
|
||||
print("in get cache")
|
||||
# print("in get cache")
|
||||
try:
|
||||
global cache_collection
|
||||
if cache_collection == None:
|
||||
make_collection()
|
||||
print("cache collection", cache_collection)
|
||||
# print("cache collection", cache_collection)
|
||||
user_question = message_to_user_question(messages)
|
||||
results = cache_collection.query(
|
||||
query_texts=[user_question],
|
||||
n_results=1
|
||||
)
|
||||
print("query cache result", results)
|
||||
# print("query cache result", results)
|
||||
distance = results['distances'][0][0]
|
||||
sim = (1 - distance)
|
||||
if sim >= litellm.cache_similarity_threshold:
|
||||
|
@ -757,5 +757,5 @@ def get_cache(messages):
|
|||
# no hit
|
||||
return None
|
||||
except Exception as e:
|
||||
print("error in get cache", e)
|
||||
# print("error in get cache", e)
|
||||
return None
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.373"
|
||||
version = "0.1.374"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue