diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index 3aecb5f43..9ec9a4d7f 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -34,7 +34,6 @@ class LangFuseLogger: # print(response_obj['usage']['prompt_tokens']) # print(response_obj['usage']['completion_tokens']) - generationStartTime = datetime.now() self.Langfuse.generation(InitialGeneration( name="litellm-completion", startTime=start_time, @@ -48,7 +47,7 @@ class LangFuseLogger: completion_tokens=response_obj['usage']['completion_tokens'] ), )) - + self.Langfuse.flush() print_verbose( f"Langfuse Layer Logging - final response object: {response_obj}" ) diff --git a/litellm/tests/test_promptlayer_integration.py b/litellm/tests/test_promptlayer_integration.py index 06cb58d18..9cb2ccdf1 100644 --- a/litellm/tests/test_promptlayer_integration.py +++ b/litellm/tests/test_promptlayer_integration.py @@ -1,6 +1,6 @@ -# #### What this tests #### -# # This tests if logging to the llmonitor integration actually works -# # Adds the parent directory to the system path +#### What this tests #### +# This tests if logging to the llmonitor integration actually works +# Adds the parent directory to the system path # import sys # import os @@ -33,14 +33,13 @@ # def test_chat_openai(): # litellm.success_callback = ["langfuse"] # try: -# response = completion(model="gpt-3.5-turbo", +# response = completion(model="replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1", # messages=[{ # "role": "user", # "content": "Hi 👋 - i'm openai" # }]) # print(response) - # except Exception as e: # print(e)