From 3743893e76c2fa2c6cc8d7d624a572007bd9dded Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 30 Oct 2023 20:37:12 -0700 Subject: [PATCH] fix(main.py): removing print_verbose --- litellm/main.py | 3 -- litellm/tests/test_helicone_integration.py | 44 +++++++++++----------- litellm/tests/test_sentry.py | 40 -------------------- 3 files changed, 22 insertions(+), 65 deletions(-) delete mode 100644 litellm/tests/test_sentry.py diff --git a/litellm/main.py b/litellm/main.py index aaf8d790fa..d1c35ef1ee 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -416,9 +416,6 @@ def completion( or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105 ) # set API KEY - print_verbose( - f"api_key: {api_key}; dynamic_api_key: {dynamic_api_key}; litellm.api_key: {litellm.api_key}; litellm.openai_key: {litellm.openai_key}; os.environ['OPENAI_API_KEY']: {os.environ['OPENAI_API_KEY']}" - ) api_key = ( api_key or dynamic_api_key or # allows us to read env variables for compatible openai api's like perplexity diff --git a/litellm/tests/test_helicone_integration.py b/litellm/tests/test_helicone_integration.py index 66e375d170..82669d0920 100644 --- a/litellm/tests/test_helicone_integration.py +++ b/litellm/tests/test_helicone_integration.py @@ -1,30 +1,30 @@ -#### What this tests #### -# This tests if logging to the helicone integration actually works +# #### What this tests #### +# # This tests if logging to the helicone integration actually works -import sys, os -import traceback -import pytest +# import sys, os +# import traceback +# import pytest -sys.path.insert( - 0, os.path.abspath("../..") -) # Adds the parent directory to the system path -import litellm -from litellm import embedding, completion +# sys.path.insert( +# 0, os.path.abspath("../..") +# ) # Adds the parent directory to the system path +# import litellm +# from litellm import embedding, completion -litellm.success_callback = ["helicone"] +# litellm.success_callback = ["helicone"] -litellm.set_verbose = True +# litellm.set_verbose = True -user_message = "Hello, how are you?" -messages = [{"content": user_message, "role": "user"}] +# user_message = "Hello, how are you?" +# messages = [{"content": user_message, "role": "user"}] -# openai call -response = completion( - model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}] -) +# # openai call +# response = completion( +# model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}] +# ) -# cohere call -response = completion( - model="command-nightly", messages=[{"role": "user", "content": "Hi 👋 - i'm cohere"}] -) +# # cohere call +# response = completion( +# model="command-nightly", messages=[{"role": "user", "content": "Hi 👋 - i'm cohere"}] +# ) diff --git a/litellm/tests/test_sentry.py b/litellm/tests/test_sentry.py deleted file mode 100644 index be53a9ca4c..0000000000 --- a/litellm/tests/test_sentry.py +++ /dev/null @@ -1,40 +0,0 @@ -import sys -import os -import io - -sys.path.insert(0, os.path.abspath('../..')) - -from litellm import completion -import litellm - -litellm.failure_callback = ["sentry"] - -import time - -def test_exception_tracking(): - print('expect this to fail and log to sentry') - litellm.set_verbose=True - old_api_key = os.environ["OPENAI_API_KEY"] - os.environ["OPENAI_API_KEY"] = "ss" - try: - response = completion(model="gpt-3.5-turbo", - messages=[{ - "role": "user", - "content": "Hi 👋 - i'm claude" - }], - max_tokens=10, - temperature=0.2 - ) - print(response) - os.environ["OPENAI_API_KEY"] = old_api_key - except Exception as e: - print("got_exception") - print(e) - os.environ["OPENAI_API_KEY"] = old_api_key - -test_exception_tracking() - - - - -