mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
[Optimize] Optimize the code in caching file
This commit is contained in:
parent
c5ca2619f9
commit
9b77b8c90b
1 changed files with 7 additions and 14 deletions
|
@ -374,10 +374,11 @@ class RedisCache(BaseCache):
|
||||||
f"Set ASYNC Redis Cache PIPELINE: key: {cache_key}\nValue {cache_value}\nttl={ttl}"
|
f"Set ASYNC Redis Cache PIPELINE: key: {cache_key}\nValue {cache_value}\nttl={ttl}"
|
||||||
)
|
)
|
||||||
# Set the value with a TTL if it's provided.
|
# Set the value with a TTL if it's provided.
|
||||||
|
json_cache_value = json.dumps(cache_value)
|
||||||
if ttl is not None:
|
if ttl is not None:
|
||||||
pipe.setex(cache_key, ttl, json.dumps(cache_value))
|
pipe.setex(cache_key, ttl, json_cache_value)
|
||||||
else:
|
else:
|
||||||
pipe.set(cache_key, json.dumps(cache_value))
|
pipe.set(cache_key, json_cache_value)
|
||||||
# Execute the pipeline and return the results.
|
# Execute the pipeline and return the results.
|
||||||
results = await pipe.execute()
|
results = await pipe.execute()
|
||||||
|
|
||||||
|
@ -810,9 +811,7 @@ class RedisSemanticCache(BaseCache):
|
||||||
|
|
||||||
# get the prompt
|
# get the prompt
|
||||||
messages = kwargs["messages"]
|
messages = kwargs["messages"]
|
||||||
prompt = ""
|
prompt = "".join(message["content"] for message in messages)
|
||||||
for message in messages:
|
|
||||||
prompt += message["content"]
|
|
||||||
|
|
||||||
# create an embedding for prompt
|
# create an embedding for prompt
|
||||||
embedding_response = litellm.embedding(
|
embedding_response = litellm.embedding(
|
||||||
|
@ -847,9 +846,7 @@ class RedisSemanticCache(BaseCache):
|
||||||
|
|
||||||
# get the messages
|
# get the messages
|
||||||
messages = kwargs["messages"]
|
messages = kwargs["messages"]
|
||||||
prompt = ""
|
prompt = "".join(message["content"] for message in messages)
|
||||||
for message in messages:
|
|
||||||
prompt += message["content"]
|
|
||||||
|
|
||||||
# convert to embedding
|
# convert to embedding
|
||||||
embedding_response = litellm.embedding(
|
embedding_response = litellm.embedding(
|
||||||
|
@ -909,9 +906,7 @@ class RedisSemanticCache(BaseCache):
|
||||||
|
|
||||||
# get the prompt
|
# get the prompt
|
||||||
messages = kwargs["messages"]
|
messages = kwargs["messages"]
|
||||||
prompt = ""
|
prompt = "".join(message["content"] for message in messages)
|
||||||
for message in messages:
|
|
||||||
prompt += message["content"]
|
|
||||||
# create an embedding for prompt
|
# create an embedding for prompt
|
||||||
router_model_names = (
|
router_model_names = (
|
||||||
[m["model_name"] for m in llm_model_list]
|
[m["model_name"] for m in llm_model_list]
|
||||||
|
@ -964,9 +959,7 @@ class RedisSemanticCache(BaseCache):
|
||||||
|
|
||||||
# get the messages
|
# get the messages
|
||||||
messages = kwargs["messages"]
|
messages = kwargs["messages"]
|
||||||
prompt = ""
|
prompt = "".join(message["content"] for message in messages)
|
||||||
for message in messages:
|
|
||||||
prompt += message["content"]
|
|
||||||
|
|
||||||
router_model_names = (
|
router_model_names = (
|
||||||
[m["model_name"] for m in llm_model_list]
|
[m["model_name"] for m in llm_model_list]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue