fix caching

This commit is contained in:
ishaan-jaff 2023-08-26 21:05:34 -07:00
parent 28b32dbe8b
commit d4fd0b2010
2 changed files with 16 additions and 10 deletions

View file

@ -14,7 +14,6 @@ from litellm import embedding, completion
messages = [{"role": "user", "content": "who is ishaan Github? "}]
# test if response cached
def test_caching():
try:
@ -36,6 +35,7 @@ def test_caching():
def test_caching_with_models():
litellm.caching_with_models = True
response1 = completion(model="gpt-3.5-turbo", messages=messages)
response2 = completion(model="gpt-3.5-turbo", messages=messages)
response3 = completion(model="command-nightly", messages=messages)
print(f"response2: {response2}")
@ -46,6 +46,12 @@ def test_caching_with_models():
print(f"response2: {response2}")
print(f"response3: {response3}")
pytest.fail(f"Error occurred:")
if response1 != response2:
print(f"response1: {response1}")
print(f"response2: {response2}")
pytest.fail(f"Error occurred:")
# test_caching_with_models()
def test_gpt_cache():