mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
Add tests for cache with TTL
This commit is contained in:
parent
f0a9f8c61e
commit
bac6f41eb7
1 changed files with 33 additions and 15 deletions
|
@ -1,4 +1,5 @@
|
||||||
import sys, os
|
import sys, os
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
@ -36,7 +37,7 @@ def test_gpt_cache():
|
||||||
cache_key = last_content_without_prompt_val + data["model"]
|
cache_key = last_content_without_prompt_val + data["model"]
|
||||||
print("cache_key", cache_key)
|
print("cache_key", cache_key)
|
||||||
return cache_key
|
return cache_key
|
||||||
|
|
||||||
|
|
||||||
cache.init(pre_func=pre_cache_func)
|
cache.init(pre_func=pre_cache_func)
|
||||||
cache.set_openai_key()
|
cache.set_openai_key()
|
||||||
|
@ -46,12 +47,12 @@ def test_gpt_cache():
|
||||||
response2 = completion(model="gpt-3.5-turbo", messages=messages)
|
response2 = completion(model="gpt-3.5-turbo", messages=messages)
|
||||||
response3 = completion(model="command-nightly", messages=messages)
|
response3 = completion(model="command-nightly", messages=messages)
|
||||||
|
|
||||||
if response1["choices"] != response2["choices"]: # same models should cache
|
if response1["choices"] != response2["choices"]: # same models should cache
|
||||||
print(f"response1: {response1}")
|
print(f"response1: {response1}")
|
||||||
print(f"response2: {response2}")
|
print(f"response2: {response2}")
|
||||||
pytest.fail(f"Error occurred:")
|
pytest.fail(f"Error occurred:")
|
||||||
|
|
||||||
if response3["choices"] == response2["choices"]: # different models, don't cache
|
if response3["choices"] == response2["choices"]: # different models, don't cache
|
||||||
# if models are different, it should not return cached response
|
# if models are different, it should not return cached response
|
||||||
print(f"response2: {response2}")
|
print(f"response2: {response2}")
|
||||||
print(f"response3: {response3}")
|
print(f"response3: {response3}")
|
||||||
|
@ -124,9 +125,9 @@ def test_embedding_caching():
|
||||||
embedding2 = embedding(model="text-embedding-ada-002", input=text_to_embed, caching=True)
|
embedding2 = embedding(model="text-embedding-ada-002", input=text_to_embed, caching=True)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
print(f"Embedding 2 response time: {end_time - start_time} seconds")
|
print(f"Embedding 2 response time: {end_time - start_time} seconds")
|
||||||
|
|
||||||
litellm.cache = None
|
litellm.cache = None
|
||||||
assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s
|
assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s
|
||||||
if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']:
|
if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']:
|
||||||
print(f"embedding1: {embedding1}")
|
print(f"embedding1: {embedding1}")
|
||||||
print(f"embedding2: {embedding2}")
|
print(f"embedding2: {embedding2}")
|
||||||
|
@ -178,14 +179,14 @@ def test_embedding_caching_azure():
|
||||||
)
|
)
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
print(f"Embedding 2 response time: {end_time - start_time} seconds")
|
print(f"Embedding 2 response time: {end_time - start_time} seconds")
|
||||||
|
|
||||||
litellm.cache = None
|
litellm.cache = None
|
||||||
assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s
|
assert end_time - start_time <= 0.1 # ensure 2nd response comes in in under 0.1 s
|
||||||
if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']:
|
if embedding2['data'][0]['embedding'] != embedding1['data'][0]['embedding']:
|
||||||
print(f"embedding1: {embedding1}")
|
print(f"embedding1: {embedding1}")
|
||||||
print(f"embedding2: {embedding2}")
|
print(f"embedding2: {embedding2}")
|
||||||
pytest.fail("Error occurred: Embedding caching failed")
|
pytest.fail("Error occurred: Embedding caching failed")
|
||||||
|
|
||||||
os.environ['AZURE_API_VERSION'] = api_version
|
os.environ['AZURE_API_VERSION'] = api_version
|
||||||
os.environ['AZURE_API_BASE'] = api_base
|
os.environ['AZURE_API_BASE'] = api_base
|
||||||
os.environ['AZURE_API_KEY'] = api_key
|
os.environ['AZURE_API_KEY'] = api_key
|
||||||
|
@ -279,11 +280,11 @@ def test_redis_cache_completion():
|
||||||
|
|
||||||
def set_cache(key, value):
|
def set_cache(key, value):
|
||||||
local_cache[key] = value
|
local_cache[key] = value
|
||||||
|
|
||||||
def get_cache(key):
|
def get_cache(key):
|
||||||
if key in local_cache:
|
if key in local_cache:
|
||||||
return local_cache[key]
|
return local_cache[key]
|
||||||
|
|
||||||
litellm.cache.cache.set_cache = set_cache
|
litellm.cache.cache.set_cache = set_cache
|
||||||
litellm.cache.cache.get_cache = get_cache
|
litellm.cache.cache.get_cache = get_cache
|
||||||
|
|
||||||
|
@ -322,11 +323,11 @@ def test_custom_redis_cache_with_key():
|
||||||
|
|
||||||
def set_cache(key, value):
|
def set_cache(key, value):
|
||||||
local_cache[key] = value
|
local_cache[key] = value
|
||||||
|
|
||||||
def get_cache(key):
|
def get_cache(key):
|
||||||
if key in local_cache:
|
if key in local_cache:
|
||||||
return local_cache[key]
|
return local_cache[key]
|
||||||
|
|
||||||
litellm.cache.cache.set_cache = set_cache
|
litellm.cache.cache.set_cache = set_cache
|
||||||
litellm.cache.cache.get_cache = get_cache
|
litellm.cache.cache.get_cache = get_cache
|
||||||
|
|
||||||
|
@ -335,16 +336,16 @@ def test_custom_redis_cache_with_key():
|
||||||
response1 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=True)
|
response1 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=True)
|
||||||
response2 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=True)
|
response2 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=True)
|
||||||
response3 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=False)
|
response3 = completion(model="gpt-3.5-turbo", messages=messages, temperature=1, caching=False)
|
||||||
|
|
||||||
print(f"response1: {response1}")
|
print(f"response1: {response1}")
|
||||||
print(f"response2: {response2}")
|
print(f"response2: {response2}")
|
||||||
print(f"response3: {response3}")
|
print(f"response3: {response3}")
|
||||||
|
|
||||||
if response3['choices'][0]['message']['content'] == response2['choices'][0]['message']['content']:
|
if response3['choices'][0]['message']['content'] == response2['choices'][0]['message']['content']:
|
||||||
pytest.fail(f"Error occurred:")
|
pytest.fail(f"Error occurred:")
|
||||||
litellm.cache = None
|
litellm.cache = None
|
||||||
|
|
||||||
test_custom_redis_cache_with_key()
|
# test_custom_redis_cache_with_key()
|
||||||
|
|
||||||
def test_hosted_cache():
|
def test_hosted_cache():
|
||||||
litellm.cache = Cache(type="hosted") # use api.litellm.ai for caching
|
litellm.cache = Cache(type="hosted") # use api.litellm.ai for caching
|
||||||
|
@ -364,3 +365,20 @@ def test_hosted_cache():
|
||||||
|
|
||||||
# test_hosted_cache()
|
# test_hosted_cache()
|
||||||
|
|
||||||
|
|
||||||
|
def test_redis_cache_with_ttl():
|
||||||
|
cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD'])
|
||||||
|
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
|
||||||
|
cached_value = cache.get_cache(cache_key="test_key")
|
||||||
|
assert cached_value == "test_value"
|
||||||
|
time.sleep(2)
|
||||||
|
assert cache.get_cache(cache_key="test_key") is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_in_memory_cache_with_ttl():
|
||||||
|
cache = Cache(type="local")
|
||||||
|
cache.add_cache(cache_key="test_key", result="test_value", ttl=1)
|
||||||
|
cached_value = cache.get_cache(cache_key="test_key")
|
||||||
|
assert cached_value == "test_value"
|
||||||
|
time.sleep(2)
|
||||||
|
assert cache.get_cache(cache_key="test_key") is None
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue