forked from phoenix/litellm-mirror
Merge pull request #722 from karvetskiy/fix-router-caching
Fix caching for Router
This commit is contained in:
commit
9bef396d04
3 changed files with 118 additions and 98 deletions
|
@ -11,6 +11,7 @@ import litellm
|
|||
import time
|
||||
import json
|
||||
|
||||
|
||||
def get_prompt(*args, **kwargs):
|
||||
# make this safe checks, it should not throw any exceptions
|
||||
if len(args) > 1:
|
||||
|
@ -23,81 +24,98 @@ def get_prompt(*args, **kwargs):
|
|||
return prompt
|
||||
return None
|
||||
|
||||
class RedisCache():
|
||||
|
||||
class BaseCache:
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_cache(self, key, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class RedisCache(BaseCache):
|
||||
def __init__(self, host, port, password):
|
||||
import redis
|
||||
# if users don't provider one, use the default litellm cache
|
||||
self.redis_client = redis.Redis(host=host, port=port, password=password)
|
||||
|
||||
def set_cache(self, key, value):
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
ttl = kwargs.get("ttl", None)
|
||||
try:
|
||||
self.redis_client.set(key, str(value))
|
||||
self.redis_client.set(name=key, value=str(value), ex=ttl)
|
||||
except Exception as e:
|
||||
# NON blocking - notify users Redis is throwing an exception
|
||||
print("LiteLLM Caching: Got exception from REDIS: ", e)
|
||||
|
||||
def get_cache(self, key):
|
||||
def get_cache(self, key, **kwargs):
|
||||
try:
|
||||
# TODO convert this to a ModelResponse object
|
||||
cached_response = self.redis_client.get(key)
|
||||
if cached_response!=None:
|
||||
if cached_response != None:
|
||||
# cached_response is in `b{} convert it to ModelResponse
|
||||
cached_response = cached_response.decode("utf-8") # Convert bytes to string
|
||||
cached_response = json.loads(cached_response) # Convert string to dictionary
|
||||
cached_response['cache'] = True # set cache-hit flag to True
|
||||
cached_response['cache'] = True # set cache-hit flag to True
|
||||
return cached_response
|
||||
except Exception as e:
|
||||
# NON blocking - notify users Redis is throwing an exception
|
||||
print("LiteLLM Caching: Got exception from REDIS: ", e)
|
||||
|
||||
class HostedCache():
|
||||
def set_cache(self, key, value):
|
||||
|
||||
class HostedCache(BaseCache):
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
if "ttl" in kwargs:
|
||||
print("LiteLLM Caching: TTL is not supported for hosted cache!")
|
||||
# make a post request to api.litellm.ai/set_cache
|
||||
import requests
|
||||
url = f"https://api.litellm.ai/set_cache?key={key}&value={str(value)}"
|
||||
requests.request("POST", url) # post request to set this in the hosted litellm cache
|
||||
requests.request("POST", url) # post request to set this in the hosted litellm cache
|
||||
|
||||
def get_cache(self, key):
|
||||
def get_cache(self, key, **kwargs):
|
||||
import requests
|
||||
url = f"https://api.litellm.ai/get_cache?key={key}"
|
||||
cached_response = requests.request("GET", url)
|
||||
cached_response = cached_response.text
|
||||
if cached_response == "NONE": # api.litellm.ai returns "NONE" if it's not a cache hit
|
||||
if cached_response == "NONE": # api.litellm.ai returns "NONE" if it's not a cache hit
|
||||
return None
|
||||
if cached_response!=None:
|
||||
if cached_response != None:
|
||||
try:
|
||||
cached_response = json.loads(cached_response) # Convert string to dictionary
|
||||
cached_response['cache'] = True # set cache-hit flag to True
|
||||
cached_response['cache'] = True # set cache-hit flag to True
|
||||
return cached_response
|
||||
except:
|
||||
return cached_response
|
||||
|
||||
class InMemoryCache():
|
||||
|
||||
class InMemoryCache(BaseCache):
|
||||
def __init__(self):
|
||||
# if users don't provider one, use the default litellm cache
|
||||
self.cache_dict = {}
|
||||
self.ttl_dict = {}
|
||||
|
||||
def set_cache(self, key, value):
|
||||
#print("in set cache for inmem")
|
||||
def set_cache(self, key, value, **kwargs):
|
||||
self.cache_dict[key] = value
|
||||
#print(self.cache_dict)
|
||||
if "ttl" in kwargs:
|
||||
self.ttl_dict[key] = time.time() + kwargs["ttl"]
|
||||
|
||||
def get_cache(self, key):
|
||||
#print("in get cache for inmem")
|
||||
def get_cache(self, key, **kwargs):
|
||||
if key in self.cache_dict:
|
||||
#print("got a cache hit")
|
||||
if key in self.ttl_dict:
|
||||
if time.time() > self.ttl_dict[key]:
|
||||
self.cache_dict.pop(key, None)
|
||||
return None
|
||||
return self.cache_dict[key]
|
||||
#print("got a cache miss")
|
||||
return None
|
||||
|
||||
class Cache():
|
||||
|
||||
class Cache:
|
||||
def __init__(
|
||||
self,
|
||||
type = "local",
|
||||
host = None,
|
||||
port = None,
|
||||
password = None
|
||||
):
|
||||
type="local",
|
||||
host=None,
|
||||
port=None,
|
||||
password=None
|
||||
):
|
||||
"""
|
||||
Initializes the cache based on the given type.
|
||||
|
||||
|
@ -151,7 +169,7 @@ class Cache():
|
|||
def generate_streaming_content(self, content):
|
||||
chunk_size = 5 # Adjust the chunk size as needed
|
||||
for i in range(0, len(content), chunk_size):
|
||||
yield {'choices': [{'delta': {'role': 'assistant', 'content': content[i:i+chunk_size]}}]}
|
||||
yield {'choices': [{'delta': {'role': 'assistant', 'content': content[i:i + chunk_size]}}]}
|
||||
time.sleep(0.02)
|
||||
|
||||
def get_cache(self, *args, **kwargs):
|
||||
|
@ -174,8 +192,8 @@ class Cache():
|
|||
cached_result = self.cache.get_cache(cache_key)
|
||||
if cached_result != None and 'stream' in kwargs and kwargs['stream'] == True:
|
||||
# if streaming is true and we got a cache hit, return a generator
|
||||
#print("cache hit and stream=True")
|
||||
#print(cached_result)
|
||||
# print("cache hit and stream=True")
|
||||
# print(cached_result)
|
||||
return self.generate_streaming_content(cached_result["choices"][0]['message']['content'])
|
||||
return cached_result
|
||||
except:
|
||||
|
@ -201,12 +219,6 @@ class Cache():
|
|||
# print(cache_key)
|
||||
if cache_key is not None:
|
||||
# print("adding to cache", cache_key, result)
|
||||
self.cache.set_cache(cache_key, result)
|
||||
self.cache.set_cache(cache_key, result, **kwargs)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ class Router:
|
|||
"""
|
||||
model_names: List = []
|
||||
cache_responses: bool = False
|
||||
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
|
||||
|
||||
def __init__(self,
|
||||
model_list: Optional[list] = None,
|
||||
redis_host: Optional[str] = None,
|
||||
|
@ -133,7 +135,10 @@ class Router:
|
|||
Function LiteLLM submits a callback to after a successful
|
||||
completion. Purpose of this is ti update TPM/RPM usage per model
|
||||
"""
|
||||
model_name = kwargs.get('model', None) # i.e. azure/gpt35turbo
|
||||
model_name = kwargs.get('model', None) # i.e. gpt35turbo
|
||||
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
|
||||
if custom_llm_provider:
|
||||
model_name = f"{custom_llm_provider}/{model_name}"
|
||||
total_tokens = completion_response['usage']['total_tokens']
|
||||
self._set_deployment_usage(model_name, total_tokens)
|
||||
|
||||
|
@ -150,17 +155,9 @@ class Router:
|
|||
if item["model_name"] == model:
|
||||
potential_deployments.append(item)
|
||||
|
||||
# set first model as current model
|
||||
# set first model as current model to calculate token count
|
||||
deployment = potential_deployments[0]
|
||||
|
||||
|
||||
# get model tpm, rpm limits
|
||||
tpm = deployment["tpm"]
|
||||
rpm = deployment["rpm"]
|
||||
|
||||
# get deployment current usage
|
||||
current_tpm, current_rpm = self._get_deployment_usage(deployment_name=deployment["litellm_params"]["model"])
|
||||
|
||||
# get encoding
|
||||
token_count = 0
|
||||
if messages is not None:
|
||||
|
@ -172,29 +169,27 @@ class Router:
|
|||
input_text = input
|
||||
token_count = litellm.token_counter(model=deployment["model_name"], text=input_text)
|
||||
|
||||
# if at model limit, return lowest used
|
||||
if current_tpm + token_count > tpm or current_rpm + 1 >= rpm:
|
||||
# -----------------------
|
||||
# Find lowest used model
|
||||
# ----------------------
|
||||
lowest_tpm = float('inf')
|
||||
deployment = None
|
||||
# -----------------------
|
||||
# Find lowest used model
|
||||
# ----------------------
|
||||
lowest_tpm = float("inf")
|
||||
deployment = None
|
||||
|
||||
# Go through all the models to get tpm, rpm
|
||||
for item in potential_deployments:
|
||||
item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
|
||||
# Go through all the models to get tpm, rpm
|
||||
for item in potential_deployments:
|
||||
item_tpm, item_rpm = self._get_deployment_usage(deployment_name=item["litellm_params"]["model"])
|
||||
|
||||
if item_tpm == 0:
|
||||
return item
|
||||
elif item_tpm + token_count > item["tpm"] or item_rpm + 1 >= item["rpm"]:
|
||||
continue
|
||||
elif item_tpm < lowest_tpm:
|
||||
lowest_tpm = item_tpm
|
||||
deployment = item
|
||||
if item_tpm == 0:
|
||||
return item
|
||||
elif item_tpm + token_count > item["tpm"] or item_rpm + 1 >= item["rpm"]:
|
||||
continue
|
||||
elif item_tpm < lowest_tpm:
|
||||
lowest_tpm = item_tpm
|
||||
deployment = item
|
||||
|
||||
# if none, raise exception
|
||||
if deployment is None:
|
||||
raise ValueError(f"No models available.")
|
||||
# if none, raise exception
|
||||
if deployment is None:
|
||||
raise ValueError("No models available.")
|
||||
|
||||
# return model
|
||||
return deployment
|
||||
|
@ -213,26 +208,21 @@ class Router:
|
|||
# ------------
|
||||
# Return usage
|
||||
# ------------
|
||||
tpm = self.cache.get_cache(tpm_key)
|
||||
rpm = self.cache.get_cache(rpm_key)
|
||||
|
||||
if tpm is None:
|
||||
tpm = 0
|
||||
if rpm is None:
|
||||
rpm = 0
|
||||
tpm = self.cache.get_cache(cache_key=tpm_key) or 0
|
||||
rpm = self.cache.get_cache(cache_key=rpm_key) or 0
|
||||
|
||||
return int(tpm), int(rpm)
|
||||
|
||||
def increment(self, key: str, increment_value: int):
|
||||
# get value
|
||||
cached_value = self.cache.get_cache(key)
|
||||
cached_value = self.cache.get_cache(cache_key=key)
|
||||
# update value
|
||||
try:
|
||||
cached_value = cached_value + increment_value
|
||||
except:
|
||||
cached_value = increment_value
|
||||
# save updated value
|
||||
self.cache.add_cache(result=cached_value, cache_key=key)
|
||||
self.cache.add_cache(result=cached_value, cache_key=key, ttl=self.default_cache_time_seconds)
|
||||
|
||||
def _set_deployment_usage(
|
||||
self,
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import sys, os
|
||||
import time
|
||||
import traceback
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
@ -344,7 +345,7 @@ def test_custom_redis_cache_with_key():
|
|||
pytest.fail(f"Error occurred:")
|
||||
litellm.cache = None
|
||||
|
||||
test_custom_redis_cache_with_key()
|
||||
# test_custom_redis_cache_with_key()
|
||||
|
||||
def test_hosted_cache():
|
||||
litellm.cache = Cache(type="hosted") # use api.litellm.ai for caching
|
||||
|
@ -364,3 +365,20 @@ def test_hosted_cache():
|
|||
|
||||
# test_hosted_cache()
|
||||
|
||||
|
||||
def test_redis_cache_with_ttl():
|
||||
cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD'])
|
||||
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
|
||||
cached_value = cache.get_cache(cache_key="test_key")
|
||||
assert cached_value == "test_value"
|
||||
time.sleep(2)
|
||||
assert cache.get_cache(cache_key="test_key") is None
|
||||
|
||||
|
||||
def test_in_memory_cache_with_ttl():
|
||||
cache = Cache(type="local")
|
||||
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
|
||||
cached_value = cache.get_cache(cache_key="test_key")
|
||||
assert cached_value == "test_value"
|
||||
time.sleep(2)
|
||||
assert cache.get_cache(cache_key="test_key") is None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue