mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
formatting improvements
This commit is contained in:
parent
3e0a16acf4
commit
a69b7ffcfa
17 changed files with 464 additions and 323 deletions
|
@ -14,6 +14,7 @@ from litellm import embedding, completion
|
|||
|
||||
messages = [{"role": "user", "content": "who is ishaan Github? "}]
|
||||
|
||||
|
||||
# test if response cached
|
||||
def test_caching():
|
||||
try:
|
||||
|
@ -50,14 +51,16 @@ def test_caching_with_models():
|
|||
print(f"response1: {response1}")
|
||||
print(f"response2: {response2}")
|
||||
pytest.fail(f"Error occurred:")
|
||||
# test_caching_with_models()
|
||||
|
||||
|
||||
# test_caching_with_models()
|
||||
|
||||
|
||||
def test_gpt_cache():
|
||||
# INIT GPT Cache #
|
||||
from gptcache import cache
|
||||
from litellm.cache import completion
|
||||
|
||||
cache.init()
|
||||
cache.set_openai_key()
|
||||
|
||||
|
@ -67,10 +70,11 @@ def test_gpt_cache():
|
|||
print(f"response2: {response2}")
|
||||
print(f"response3: {response3}")
|
||||
|
||||
if response3['choices'] != response2['choices']:
|
||||
if response3["choices"] != response2["choices"]:
|
||||
# if models are different, it should not return cached response
|
||||
print(f"response2: {response2}")
|
||||
print(f"response3: {response3}")
|
||||
pytest.fail(f"Error occurred:")
|
||||
# test_gpt_cache()
|
||||
|
||||
|
||||
# test_gpt_cache()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue