From 4cccad35f496b58a3b69f0ec84878883cf52e17d Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 17 Oct 2023 11:51:01 -0700 Subject: [PATCH] (feat) bump langfuse logger, track function responses, completion() metadata like temp, max_tokens etc --- litellm/integrations/langfuse.py | 16 +++++++-- litellm/tests/test_langfuse.py | 62 ++++++++++++++++---------------- 2 files changed, 45 insertions(+), 33 deletions(-) diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index a98bf02fd..a7dda442a 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -37,14 +37,24 @@ class LangFuseLogger: # print(response_obj['usage']['prompt_tokens']) # print(response_obj['usage']['completion_tokens']) metadata = kwargs.get("metadata", {}) + prompt = [kwargs['messages']] + + # langfuse does not accept jsons for logging metadata # + kwargs.pop("litellm_logging_obj", None) + kwargs.pop("messages", None) + kwargs.pop("functions", None) # ensure it's a safe pop + kwargs.pop("function_call", None) # ensure it's a safe pop + kwargs.pop("metadata", None) # ensure it's a safe pop + # end of processing langfuse ######################## + self.Langfuse.generation(InitialGeneration( name=metadata.get("generation_name", "litellm-completion"), startTime=start_time, endTime=end_time, model=kwargs['model'], - # modelParameters= kwargs, - prompt=[kwargs['messages']], - completion=response_obj['choices'][0]['message']['content'], + modelParameters= kwargs, + prompt=prompt, + completion=response_obj['choices'][0]['message'], usage=Usage( prompt_tokens=response_obj['usage']['prompt_tokens'], completion_tokens=response_obj['usage']['completion_tokens'] diff --git a/litellm/tests/test_langfuse.py b/litellm/tests/test_langfuse.py index 14bd0a336..60aaf006d 100644 --- a/litellm/tests/test_langfuse.py +++ b/litellm/tests/test_langfuse.py @@ -21,6 +21,7 @@ def test_langfuse_logging(): "content": "Hi 👋 - i'm claude" }], max_tokens=10, + temperature=0.2 ) print(response) except Exception as e: @@ -48,37 +49,38 @@ def test_langfuse_logging_custom_generation_name(): test_langfuse_logging_custom_generation_name() -# def test_langfuse_logging_function_calling(): -# function1 = [ -# { -# "name": "get_current_weather", -# "description": "Get the current weather in a given location", -# "parameters": { -# "type": "object", -# "properties": { -# "location": { -# "type": "string", -# "description": "The city and state, e.g. San Francisco, CA", -# }, -# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, -# }, -# "required": ["location"], -# }, -# } -# ] -# try: -# response = completion(model="gpt-3.5-turbo", -# messages=[{ -# "role": "user", -# "content": "what's the weather outside" -# }], -# functions=function1, -# ) -# print(response) -# except Exception as e: -# print(e) +def test_langfuse_logging_function_calling(): + function1 = [ + { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + ] + try: + response = completion(model="gpt-3.5-turbo", + messages=[{ + "role": "user", + "content": "what's the weather in boston" + }], + temperature=0.1, + functions=function1, + ) + print(response) + except Exception as e: + print(e) -# test_langfuse_logging_function_calling() +test_langfuse_logging_function_calling()