diff --git a/litellm/tests/test_async_fn.py b/litellm/tests/test_async_fn.py index d0f4426b64..72e6fc3a7e 100644 --- a/litellm/tests/test_async_fn.py +++ b/litellm/tests/test_async_fn.py @@ -30,6 +30,8 @@ def test_get_response_streaming(): user_message = "Hello, how are you?" messages = [{"content": user_message, "role": "user"}] try: + import litellm + litellm.set_verbose = True response = await acompletion(model="gpt-3.5-turbo", messages=messages, stream=True) print(type(response)) diff --git a/litellm/tests/test_caching.py b/litellm/tests/test_caching.py index 706ea9a7e1..d24c6e4668 100644 --- a/litellm/tests/test_caching.py +++ b/litellm/tests/test_caching.py @@ -311,7 +311,6 @@ def test_redis_cache_completion(): def custom_get_cache_key(*args, **kwargs): # return key to use for your cache: key = kwargs.get("model", "") + str(kwargs.get("messages", "")) + str(kwargs.get("temperature", "")) + str(kwargs.get("logit_bias", "")) - print("key for cache", key) return key def test_custom_redis_cache_with_key():