formatting improvements

This commit is contained in:
ishaan-jaff 2023-08-28 09:20:50 -07:00
parent 3e0a16acf4
commit a69b7ffcfa
17 changed files with 464 additions and 323 deletions

View file

@ -14,6 +14,7 @@ from litellm import embedding, completion
messages = [{"role": "user", "content": "who is ishaan Github? "}]
# test if response cached
def test_caching():
try:
@ -50,14 +51,16 @@ def test_caching_with_models():
print(f"response1: {response1}")
print(f"response2: {response2}")
pytest.fail(f"Error occurred:")
# test_caching_with_models()
# test_caching_with_models()
def test_gpt_cache():
# INIT GPT Cache #
from gptcache import cache
from litellm.cache import completion
cache.init()
cache.set_openai_key()
@ -67,10 +70,11 @@ def test_gpt_cache():
print(f"response2: {response2}")
print(f"response3: {response3}")
if response3['choices'] != response2['choices']:
if response3["choices"] != response2["choices"]:
# if models are different, it should not return cached response
print(f"response2: {response2}")
print(f"response3: {response3}")
pytest.fail(f"Error occurred:")
# test_gpt_cache()
# test_gpt_cache()