Merge pull request #3594 from rkataria1000/duplicate_code

Duplicate code
This commit is contained in:
Krish Dholakia 2024-05-13 22:02:50 -07:00 committed by GitHub
commit 901254d31e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 7 additions and 20 deletions

View file

@ -373,11 +373,12 @@ class RedisCache(BaseCache):
print_verbose(
f"Set ASYNC Redis Cache PIPELINE: key: {cache_key}\nValue {cache_value}\nttl={ttl}"
)
json_cache_value = json.dumps(cache_value)
# Set the value with a TTL if it's provided.
if ttl is not None:
pipe.setex(cache_key, ttl, json.dumps(cache_value))
pipe.setex(cache_key, ttl, json_cache_value)
else:
pipe.set(cache_key, json.dumps(cache_value))
pipe.set(cache_key, json_cache_value)
# Execute the pipeline and return the results.
results = await pipe.execute()
@ -810,9 +811,7 @@ class RedisSemanticCache(BaseCache):
# get the prompt
messages = kwargs["messages"]
prompt = ""
for message in messages:
prompt += message["content"]
prompt = "".join(message["content"] for message in messages)
# create an embedding for prompt
embedding_response = litellm.embedding(
@ -847,9 +846,7 @@ class RedisSemanticCache(BaseCache):
# get the messages
messages = kwargs["messages"]
prompt = ""
for message in messages:
prompt += message["content"]
prompt = "".join(message["content"] for message in messages)
# convert to embedding
embedding_response = litellm.embedding(
@ -909,9 +906,7 @@ class RedisSemanticCache(BaseCache):
# get the prompt
messages = kwargs["messages"]
prompt = ""
for message in messages:
prompt += message["content"]
prompt = "".join(message["content"] for message in messages)
# create an embedding for prompt
router_model_names = (
[m["model_name"] for m in llm_model_list]
@ -964,9 +959,7 @@ class RedisSemanticCache(BaseCache):
# get the messages
messages = kwargs["messages"]
prompt = ""
for message in messages:
prompt += message["content"]
prompt = "".join(message["content"] for message in messages)
router_model_names = (
[m["model_name"] for m in llm_model_list]

View file

@ -101,9 +101,6 @@ class LowestCostLoggingHandler(CustomLogger):
if precise_minute not in request_count_dict[id]:
request_count_dict[id][precise_minute] = {}
if precise_minute not in request_count_dict[id]:
request_count_dict[id][precise_minute] = {}
## TPM
request_count_dict[id][precise_minute]["tpm"] = (
request_count_dict[id][precise_minute].get("tpm", 0) + total_tokens

View file

@ -115,9 +115,6 @@ class LowestLatencyLoggingHandler(CustomLogger):
if precise_minute not in request_count_dict[id]:
request_count_dict[id][precise_minute] = {}
if precise_minute not in request_count_dict[id]:
request_count_dict[id][precise_minute] = {}
## TPM
request_count_dict[id][precise_minute]["tpm"] = (
request_count_dict[id][precise_minute].get("tpm", 0) + total_tokens