Merge pull request #722 from karvetskiy/fix-router-caching

Fix caching for Router
This commit is contained in:
Krish Dholakia 2023-10-31 16:39:18 -07:00 committed by GitHub
commit 9bef396d04
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 118 additions and 98 deletions

View file

@ -11,6 +11,7 @@ import litellm
import time
import json
def get_prompt(*args, **kwargs):
# make this safe checks, it should not throw any exceptions
if len(args) > 1:
@ -23,20 +24,30 @@ def get_prompt(*args, **kwargs):
return prompt
return None
class RedisCache():
class BaseCache:
def set_cache(self, key, value, **kwargs):
raise NotImplementedError
def get_cache(self, key, **kwargs):
raise NotImplementedError
class RedisCache(BaseCache):
def __init__(self, host, port, password):
import redis
# if users don't provider one, use the default litellm cache
self.redis_client = redis.Redis(host=host, port=port, password=password)
def set_cache(self, key, value):
def set_cache(self, key, value, **kwargs):
ttl = kwargs.get("ttl", None)
try:
self.redis_client.set(key, str(value))
self.redis_client.set(name=key, value=str(value), ex=ttl)
except Exception as e:
# NON blocking - notify users Redis is throwing an exception
print("LiteLLM Caching: Got exception from REDIS: ", e)
def get_cache(self, key):
def get_cache(self, key, **kwargs):
try:
# TODO convert this to a ModelResponse object
cached_response = self.redis_client.get(key)
@ -50,14 +61,17 @@ class RedisCache():
# NON blocking - notify users Redis is throwing an exception
print("LiteLLM Caching: Got exception from REDIS: ", e)
class HostedCache():
def set_cache(self, key, value):
class HostedCache(BaseCache):
def set_cache(self, key, value, **kwargs):
if "ttl" in kwargs:
print("LiteLLM Caching: TTL is not supported for hosted cache!")
# make a post request to api.litellm.ai/set_cache
import requests
url = f"https://api.litellm.ai/set_cache?key={key}&value={str(value)}"
requests.request("POST", url) # post request to set this in the hosted litellm cache
def get_cache(self, key):
def get_cache(self, key, **kwargs):
import requests
url = f"https://api.litellm.ai/get_cache?key={key}"
cached_response = requests.request("GET", url)
@ -72,25 +86,29 @@ class HostedCache():
except:
return cached_response
class InMemoryCache():
class InMemoryCache(BaseCache):
def __init__(self):
# if users don't provider one, use the default litellm cache
self.cache_dict = {}
self.ttl_dict = {}
def set_cache(self, key, value):
#print("in set cache for inmem")
def set_cache(self, key, value, **kwargs):
self.cache_dict[key] = value
#print(self.cache_dict)
if "ttl" in kwargs:
self.ttl_dict[key] = time.time() + kwargs["ttl"]
def get_cache(self, key):
#print("in get cache for inmem")
def get_cache(self, key, **kwargs):
if key in self.cache_dict:
#print("got a cache hit")
if key in self.ttl_dict:
if time.time() > self.ttl_dict[key]:
self.cache_dict.pop(key, None)
return None
return self.cache_dict[key]
#print("got a cache miss")
return None
class Cache():
class Cache:
def __init__(
self,
type="local",
@ -201,12 +219,6 @@ class Cache():
# print(cache_key)
if cache_key is not None:
# print("adding to cache", cache_key, result)
self.cache.set_cache(cache_key, result)
self.cache.set_cache(cache_key, result, **kwargs)
except:
pass

View file

@ -24,6 +24,8 @@ class Router:
"""
model_names: List = []
cache_responses: bool = False
default_cache_time_seconds: int = 1 * 60 * 60 # 1 hour
def __init__(self,
model_list: Optional[list] = None,
redis_host: Optional[str] = None,
@ -133,7 +135,10 @@ class Router:
Function LiteLLM submits a callback to after a successful
completion. Purpose of this is ti update TPM/RPM usage per model
"""
model_name = kwargs.get('model', None) # i.e. azure/gpt35turbo
model_name = kwargs.get('model', None) # i.e. gpt35turbo
custom_llm_provider = kwargs.get("litellm_params", {}).get('custom_llm_provider', None) # i.e. azure
if custom_llm_provider:
model_name = f"{custom_llm_provider}/{model_name}"
total_tokens = completion_response['usage']['total_tokens']
self._set_deployment_usage(model_name, total_tokens)
@ -150,17 +155,9 @@ class Router:
if item["model_name"] == model:
potential_deployments.append(item)
# set first model as current model
# set first model as current model to calculate token count
deployment = potential_deployments[0]
# get model tpm, rpm limits
tpm = deployment["tpm"]
rpm = deployment["rpm"]
# get deployment current usage
current_tpm, current_rpm = self._get_deployment_usage(deployment_name=deployment["litellm_params"]["model"])
# get encoding
token_count = 0
if messages is not None:
@ -172,12 +169,10 @@ class Router:
input_text = input
token_count = litellm.token_counter(model=deployment["model_name"], text=input_text)
# if at model limit, return lowest used
if current_tpm + token_count > tpm or current_rpm + 1 >= rpm:
# -----------------------
# Find lowest used model
# ----------------------
lowest_tpm = float('inf')
lowest_tpm = float("inf")
deployment = None
# Go through all the models to get tpm, rpm
@ -194,7 +189,7 @@ class Router:
# if none, raise exception
if deployment is None:
raise ValueError(f"No models available.")
raise ValueError("No models available.")
# return model
return deployment
@ -213,26 +208,21 @@ class Router:
# ------------
# Return usage
# ------------
tpm = self.cache.get_cache(tpm_key)
rpm = self.cache.get_cache(rpm_key)
if tpm is None:
tpm = 0
if rpm is None:
rpm = 0
tpm = self.cache.get_cache(cache_key=tpm_key) or 0
rpm = self.cache.get_cache(cache_key=rpm_key) or 0
return int(tpm), int(rpm)
def increment(self, key: str, increment_value: int):
# get value
cached_value = self.cache.get_cache(key)
cached_value = self.cache.get_cache(cache_key=key)
# update value
try:
cached_value = cached_value + increment_value
except:
cached_value = increment_value
# save updated value
self.cache.add_cache(result=cached_value, cache_key=key)
self.cache.add_cache(result=cached_value, cache_key=key, ttl=self.default_cache_time_seconds)
def _set_deployment_usage(
self,

View file

@ -1,4 +1,5 @@
import sys, os
import time
import traceback
from dotenv import load_dotenv
@ -344,7 +345,7 @@ def test_custom_redis_cache_with_key():
pytest.fail(f"Error occurred:")
litellm.cache = None
test_custom_redis_cache_with_key()
# test_custom_redis_cache_with_key()
def test_hosted_cache():
litellm.cache = Cache(type="hosted") # use api.litellm.ai for caching
@ -364,3 +365,20 @@ def test_hosted_cache():
# test_hosted_cache()
def test_redis_cache_with_ttl():
cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD'])
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
cached_value = cache.get_cache(cache_key="test_key")
assert cached_value == "test_value"
time.sleep(2)
assert cache.get_cache(cache_key="test_key") is None
def test_in_memory_cache_with_ttl():
cache = Cache(type="local")
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
cached_value = cache.get_cache(cache_key="test_key")
assert cached_value == "test_value"
time.sleep(2)
assert cache.get_cache(cache_key="test_key") is None