caching with model names

This commit is contained in:
ishaan-jaff 2023-08-18 13:48:32 -07:00
parent 694a8ad90c
commit d0ba3ba2e5
4 changed files with 44 additions and 9 deletions

View file

@ -12,13 +12,13 @@ import pytest
import litellm
from litellm import embedding, completion
litellm.caching = True
messages = [{"role": "user", "content": "who is ishaan Github? "}]
# test if response cached
def test_caching():
try:
litellm.caching = True
response1 = completion(model="gpt-3.5-turbo", messages=messages)
response2 = completion(model="gpt-3.5-turbo", messages=messages)
print(f"response1: {response1}")
@ -32,3 +32,21 @@ def test_caching():
litellm.caching = False
print(f"error occurred: {traceback.format_exc()}")
pytest.fail(f"Error occurred: {e}")
def test_caching_with_models():
litellm.caching_with_models = True
response2 = completion(model="gpt-3.5-turbo", messages=messages)
response3 = completion(model="command-nightly", messages=messages)
print(f"response2: {response2}")
print(f"response3: {response3}")
litellm.caching_with_models = False
if response3 == response2:
# if models are different, it should not return cached response
print(f"response2: {response2}")
print(f"response3: {response3}")
pytest.fail(f"Error occurred: {e}")