(feat) bump langfuse logger, track function responses, completion() metadata like temp, max_tokens etc

This commit is contained in:
ishaan-jaff 2023-10-17 11:51:01 -07:00
parent dbd1d702e4
commit 4cccad35f4
2 changed files with 45 additions and 33 deletions

View file

@ -37,14 +37,24 @@ class LangFuseLogger:
# print(response_obj['usage']['prompt_tokens']) # print(response_obj['usage']['prompt_tokens'])
# print(response_obj['usage']['completion_tokens']) # print(response_obj['usage']['completion_tokens'])
metadata = kwargs.get("metadata", {}) metadata = kwargs.get("metadata", {})
prompt = [kwargs['messages']]
# langfuse does not accept jsons for logging metadata #
kwargs.pop("litellm_logging_obj", None)
kwargs.pop("messages", None)
kwargs.pop("functions", None) # ensure it's a safe pop
kwargs.pop("function_call", None) # ensure it's a safe pop
kwargs.pop("metadata", None) # ensure it's a safe pop
# end of processing langfuse ########################
self.Langfuse.generation(InitialGeneration( self.Langfuse.generation(InitialGeneration(
name=metadata.get("generation_name", "litellm-completion"), name=metadata.get("generation_name", "litellm-completion"),
startTime=start_time, startTime=start_time,
endTime=end_time, endTime=end_time,
model=kwargs['model'], model=kwargs['model'],
# modelParameters= kwargs, modelParameters= kwargs,
prompt=[kwargs['messages']], prompt=prompt,
completion=response_obj['choices'][0]['message']['content'], completion=response_obj['choices'][0]['message'],
usage=Usage( usage=Usage(
prompt_tokens=response_obj['usage']['prompt_tokens'], prompt_tokens=response_obj['usage']['prompt_tokens'],
completion_tokens=response_obj['usage']['completion_tokens'] completion_tokens=response_obj['usage']['completion_tokens']

View file

@ -21,6 +21,7 @@ def test_langfuse_logging():
"content": "Hi 👋 - i'm claude" "content": "Hi 👋 - i'm claude"
}], }],
max_tokens=10, max_tokens=10,
temperature=0.2
) )
print(response) print(response)
except Exception as e: except Exception as e:
@ -48,37 +49,38 @@ def test_langfuse_logging_custom_generation_name():
test_langfuse_logging_custom_generation_name() test_langfuse_logging_custom_generation_name()
# def test_langfuse_logging_function_calling(): def test_langfuse_logging_function_calling():
# function1 = [ function1 = [
# { {
# "name": "get_current_weather", "name": "get_current_weather",
# "description": "Get the current weather in a given location", "description": "Get the current weather in a given location",
# "parameters": { "parameters": {
# "type": "object", "type": "object",
# "properties": { "properties": {
# "location": { "location": {
# "type": "string", "type": "string",
# "description": "The city and state, e.g. San Francisco, CA", "description": "The city and state, e.g. San Francisco, CA",
# }, },
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
# }, },
# "required": ["location"], "required": ["location"],
# }, },
# } }
# ] ]
# try: try:
# response = completion(model="gpt-3.5-turbo", response = completion(model="gpt-3.5-turbo",
# messages=[{ messages=[{
# "role": "user", "role": "user",
# "content": "what's the weather outside" "content": "what's the weather in boston"
# }], }],
# functions=function1, temperature=0.1,
# ) functions=function1,
# print(response) )
# except Exception as e: print(response)
# print(e) except Exception as e:
print(e)
# test_langfuse_logging_function_calling() test_langfuse_logging_function_calling()