add langfuse integration

This commit is contained in:
ishaan-jaff 2023-08-31 11:04:17 -07:00
parent beac62b8fe
commit f466c443fe
3 changed files with 90 additions and 2 deletions

View file

@ -0,0 +1,58 @@
#### What this does ####
# On success, logs events to Langfuse
import dotenv, os
import requests
import requests
from datetime import datetime
dotenv.load_dotenv() # Loading env variables using dotenv
import traceback
class LangFuseLogger:
# Class variables or attributes
def __init__(self):
from langfuse import Langfuse
# Instance variables
self.secret_key = os.getenv("LANGFUSE_SECRET_KEY")
self.public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
self.Langfuse = Langfuse(
public_key=self.public_key,
secret_key=self.secret_key,
host="https://cloud.langfuse.com",
# debug=True
)
def log_event(self, kwargs, response_obj, start_time, end_time, print_verbose):
# Method definition
from langfuse.model import InitialGeneration, Usage
try:
print_verbose(
f"Langfuse Logging - Enters logging function for model {kwargs}"
)
# print(response_obj)
# print(response_obj['choices'][0]['message']['content'])
# print(response_obj['usage']['prompt_tokens'])
# print(response_obj['usage']['completion_tokens'])
generationStartTime = datetime.now()
self.Langfuse.generation(InitialGeneration(
name="litellm-completion",
startTime=start_time,
endTime=end_time,
model=kwargs['model'],
# modelParameters= kwargs,
prompt=[kwargs['messages']],
completion=response_obj['choices'][0]['message']['content'],
usage=Usage(
prompt_tokens=response_obj['usage']['prompt_tokens'],
completion_tokens=response_obj['usage']['completion_tokens']
),
))
print_verbose(
f"Langfuse Layer Logging - final response object: {response_obj}"
)
except:
# traceback.print_exc()
print_verbose(f"Langfuse Layer Error - {traceback.format_exc()}")
pass

View file

@ -28,4 +28,20 @@
# except Exception as e:
# print(e)
# # test_chat_openai()
# def test_chat_openai():
# litellm.success_callback = ["langfuse"]
# try:
# response = completion(model="gpt-3.5-turbo",
# messages=[{
# "role": "user",
# "content": "Hi 👋 - i'm openai"
# }])
# print(response)
# except Exception as e:
# print(e)
# test_chat_openai()

View file

@ -16,6 +16,7 @@ from .integrations.berrispend import BerriSpendLogger
from .integrations.supabase import Supabase
from .integrations.llmonitor import LLMonitorLogger
from .integrations.prompt_layer import PromptLayerLogger
from .integrations.langfuse import LangFuseLogger
from .integrations.litedebugger import LiteDebugger
from openai.error import OpenAIError as OriginalError
from openai.openai_object import OpenAIObject
@ -41,6 +42,7 @@ slack_app = None
alerts_channel = None
heliconeLogger = None
promptLayerLogger = None
langFuseLogger = None
llmonitorLogger = None
aispendLogger = None
berrispendLogger = None
@ -837,7 +839,7 @@ def load_test_model(
def set_callbacks(callback_list):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger
try:
for callback in callback_list:
print_verbose(f"callback: {callback}")
@ -898,6 +900,8 @@ def set_callbacks(callback_list):
llmonitorLogger = LLMonitorLogger()
elif callback == "promptlayer":
promptLayerLogger = PromptLayerLogger()
elif callback == "langfuse":
langFuseLogger = LangFuseLogger()
elif callback == "aispend":
aispendLogger = AISpendLogger()
elif callback == "berrispend":
@ -1165,6 +1169,16 @@ def handle_success(args, kwargs, result, start_time, end_time):
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "langfuse":
print_verbose("reaches langfuse for logging!")
langFuseLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
print_verbose=print_verbose,
)
elif callback == "aispend":
print_verbose("reaches aispend for logging!")
model = args[0] if len(args) > 0 else kwargs["model"]