From fbd2ab4c7158bcf93f4338dc100b2568171082b2 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Thu, 23 Nov 2023 08:27:44 -0800 Subject: [PATCH] (test) caching ensure we always test add/get caching redis --- litellm/tests/test_caching.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index a482fb48d..432ed8915 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -13,15 +13,14 @@ import pytest import litellm from litellm import embedding, completion from litellm.caching import Cache +import random # litellm.set_verbose=True messages = [{"role": "user", "content": "who is ishaan Github? "}] # comment - -####### Updated Caching as of Aug 28, 2023 ################### messages = [{"role": "user", "content": "who is ishaan 5222"}] -def test_caching_v2(): +def test_caching_v2(): # test in memory cache try: litellm.cache = Cache() response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True) @@ -153,13 +152,19 @@ def test_embedding_caching_azure(): def test_redis_cache_completion(): - litellm.set_verbose = True - messages = [{"role": "user", "content": "who is ishaan CTO of litellm from litellm 2023"}] + litellm.set_verbose = False + + random_number = random.randint(1, 100000) # add a random number to ensure it's always adding / reading from cache + messages = [{"role": "user", "content": f"who is ishaan CTO of litellm, call all llm apis{random_number}"}] litellm.cache = Cache(type="redis", host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD']) print("test2 for caching") response1 = completion(model="gpt-3.5-turbo", messages=messages, caching=True) response2 = completion(model="gpt-3.5-turbo", messages=messages, caching=True) response3 = completion(model="command-nightly", messages=messages, caching=True) + + print("\nresponse 1", response1) + print("\nresponse 2", response2) + print("\nresponse 3", response3) litellm.cache = None if response3['choices'][0]['message']['content'] == response2['choices'][0]['message']['content']: # if models are different, it should not return cached response