diff --git a/litellm/integrations/llmonitor.py b/litellm/integrations/llmonitor.py index b154eefea..d166e1888 100644 --- a/litellm/integrations/llmonitor.py +++ b/litellm/integrations/llmonitor.py @@ -74,7 +74,6 @@ class LLMonitorLogger: f"LLMonitor Logging - Logging request for model {model}") if response_obj: - print(response_obj) usage = parse_usage( response_obj['usage']) if 'usage' in response_obj else None output = response_obj[ @@ -110,8 +109,6 @@ class LLMonitorLogger: "tokensUsage": usage, }] - print(data) - # print_verbose(f"LLMonitor Logging - final data object: {data}") response = requests.post( diff --git a/litellm/tests/test_llmonitor_integration.py b/litellm/tests/test_llmonitor_integration.py index d0abb7d6b..359701958 100644 --- a/litellm/tests/test_llmonitor_integration.py +++ b/litellm/tests/test_llmonitor_integration.py @@ -14,14 +14,19 @@ litellm.failure_callback = ["llmonitor"] litellm.set_verbose = True -# openai call -# first_success_test = completion(model="gpt-3.5-turbo", -# messages=[{ -# "role": "user", -# "content": "Hi 👋 - i'm openai" -# }]) -# print(first_success_test) +def test_chat_openai(): + try: + response = completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "Hi 👋 - i'm openai" + }]) + + print(response) + + except Exception as e: + print(e) def test_embedding_openai(): @@ -31,7 +36,7 @@ def test_embedding_openai(): print(f"response: {str(response)[:50]}") except Exception as e: print(e) - # pytest.fail(f"Error occurred: {e}") +test_chat_openai() test_embedding_openai() \ No newline at end of file