diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt index b6894a1fa..5ef69ad7a 100644 --- a/.circleci/requirements.txt +++ b/.circleci/requirements.txt @@ -4,4 +4,5 @@ python-dotenv openai tiktoken importlib_metadata -baseten \ No newline at end of file +baseten +gptcache \ No newline at end of file diff --git a/litellm/tests/data_map.txt b/litellm/tests/data_map.txt index 277c06bd8..a4cc41423 100644 Binary files a/litellm/tests/data_map.txt and b/litellm/tests/data_map.txt differ diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 3f6618c33..d5ab68e25 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -48,22 +48,23 @@ def test_caching_with_models(): pytest.fail(f"Error occurred:") -# def test_gpt_cache(): -# # INIT GPT Cache # -# from gptcache import cache -# cache.init() -# cache.set_openai_key() +def test_gpt_cache(): + # INIT GPT Cache # + from gptcache import cache + from litellm.cache import completion + cache.init() + cache.set_openai_key() -# messages = [{"role": "user", "content": "what is litellm this morning? "}] -# response2 = litellm.cache.completion(model="gpt-3.5-turbo", messages=messages) -# response3 = litellm.cache.completion(model="command-nightly", messages=messages) -# print(f"response2: {response2}") -# print(f"response3: {response3}") + messages = [{"role": "user", "content": "what is litellm YC 22?"}] + response2 = completion(model="gpt-3.5-turbo", messages=messages) + response3 = completion(model="command-nightly", messages=messages) + print(f"response2: {response2}") + print(f"response3: {response3}") + + if response3['choices'] != response2['choices']: + # if models are different, it should not return cached response + print(f"response2: {response2}") + print(f"response3: {response3}") + pytest.fail(f"Error occurred:") -# if response3 != response2: -# # if models are different, it should not return cached response -# print(f"response2: {response2}") -# print(f"response3: {response3}") -# pytest.fail(f"Error occurred:") -# test_gpt_cache()