mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 11:43:54 +00:00
fix(utils.py): fix langfuse integration
This commit is contained in:
parent
7d3b653a99
commit
04ce14e404
5 changed files with 44 additions and 35 deletions
|
@ -37,7 +37,7 @@ class LangFuseLogger:
|
||||||
f"Langfuse Logging - Enters logging function for model {kwargs}"
|
f"Langfuse Logging - Enters logging function for model {kwargs}"
|
||||||
)
|
)
|
||||||
metadata = kwargs.get("metadata", {})
|
metadata = kwargs.get("metadata", {})
|
||||||
prompt = [kwargs['messages']]
|
prompt = [kwargs.get('messages')]
|
||||||
|
|
||||||
# langfuse does not accept jsons for logging metadata #
|
# langfuse does not accept jsons for logging metadata #
|
||||||
kwargs.pop("litellm_logging_obj", None)
|
kwargs.pop("litellm_logging_obj", None)
|
||||||
|
@ -52,7 +52,7 @@ class LangFuseLogger:
|
||||||
startTime=start_time,
|
startTime=start_time,
|
||||||
endTime=end_time,
|
endTime=end_time,
|
||||||
model=kwargs['model'],
|
model=kwargs['model'],
|
||||||
modelParameters= kwargs,
|
modelParameters= kwargs["optional_params"],
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
completion=response_obj['choices'][0]['message'],
|
completion=response_obj['choices'][0]['message'],
|
||||||
usage=Usage(
|
usage=Usage(
|
||||||
|
|
|
@ -11,6 +11,7 @@ sys.path.insert(
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import completion, acompletion, acreate
|
from litellm import completion, acompletion, acreate
|
||||||
|
litellm.num_retries = 3
|
||||||
|
|
||||||
def test_sync_response():
|
def test_sync_response():
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
|
|
@ -9,6 +9,7 @@ sys.path.insert(
|
||||||
) # Adds the parent directory to the system path
|
) # Adds the parent directory to the system path
|
||||||
from openai import APITimeoutError as Timeout
|
from openai import APITimeoutError as Timeout
|
||||||
import litellm
|
import litellm
|
||||||
|
litellm.num_retries = 3
|
||||||
from litellm import batch_completion, batch_completion_models, completion, batch_completion_models_all_responses
|
from litellm import batch_completion, batch_completion_models, completion, batch_completion_models_all_responses
|
||||||
# litellm.set_verbose=True
|
# litellm.set_verbose=True
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import io, asyncio
|
import io, asyncio
|
||||||
import logging
|
# import logging
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
# logging.basicConfig(level=logging.DEBUG)
|
||||||
sys.path.insert(0, os.path.abspath('../..'))
|
sys.path.insert(0, os.path.abspath('../..'))
|
||||||
|
|
||||||
from litellm import completion
|
from litellm import completion
|
||||||
|
@ -13,6 +13,7 @@ litellm.success_callback = ["langfuse"]
|
||||||
import time
|
import time
|
||||||
|
|
||||||
def test_langfuse_logging_async():
|
def test_langfuse_logging_async():
|
||||||
|
litellm.set_verbose = True
|
||||||
async def _test_langfuse():
|
async def _test_langfuse():
|
||||||
return await litellm.acompletion(
|
return await litellm.acompletion(
|
||||||
model="gpt-3.5-turbo",
|
model="gpt-3.5-turbo",
|
||||||
|
|
|
@ -816,6 +816,43 @@ class Logging:
|
||||||
run_id=self.litellm_call_id,
|
run_id=self.litellm_call_id,
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
if callback == "helicone":
|
||||||
|
print_verbose("reaches helicone for logging!")
|
||||||
|
model = self.model
|
||||||
|
messages = kwargs["messages"]
|
||||||
|
heliconeLogger.log_success(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
|
if callback == "langfuse":
|
||||||
|
print_verbose("reaches langfuse for logging!")
|
||||||
|
deep_copy = {}
|
||||||
|
for k, v in self.model_call_details.items():
|
||||||
|
if k != "original_response": # copy.deepcopy raises errors as this could be a coroutine
|
||||||
|
deep_copy[k] = v
|
||||||
|
langFuseLogger.log_event(
|
||||||
|
kwargs=deep_copy,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
|
if callback == "traceloop":
|
||||||
|
deep_copy = {}
|
||||||
|
for k, v in self.model_call_details.items():
|
||||||
|
if k != "original_response":
|
||||||
|
deep_copy[k] = v
|
||||||
|
traceloopLogger.log_event(
|
||||||
|
kwargs=deep_copy,
|
||||||
|
response_obj=result,
|
||||||
|
start_time=start_time,
|
||||||
|
end_time=end_time,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
if isinstance(callback, CustomLogger): # custom logger class
|
if isinstance(callback, CustomLogger): # custom logger class
|
||||||
if self.stream and complete_streaming_response is None:
|
if self.stream and complete_streaming_response is None:
|
||||||
callback.log_stream_event(
|
callback.log_stream_event(
|
||||||
|
@ -2955,37 +2992,6 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
||||||
slack_app.client.chat_postMessage(
|
slack_app.client.chat_postMessage(
|
||||||
channel=alerts_channel, text=slack_msg
|
channel=alerts_channel, text=slack_msg
|
||||||
)
|
)
|
||||||
elif callback == "helicone":
|
|
||||||
print_verbose("reaches helicone for logging!")
|
|
||||||
model = args[0] if len(args) > 0 else kwargs["model"]
|
|
||||||
messages = args[1] if len(args) > 1 else kwargs["messages"]
|
|
||||||
heliconeLogger.log_success(
|
|
||||||
model=model,
|
|
||||||
messages=messages,
|
|
||||||
response_obj=result,
|
|
||||||
start_time=start_time,
|
|
||||||
end_time=end_time,
|
|
||||||
print_verbose=print_verbose,
|
|
||||||
)
|
|
||||||
elif callback == "langfuse":
|
|
||||||
print_verbose("reaches langfuse for logging!")
|
|
||||||
langFuseLogger.log_event(
|
|
||||||
kwargs=kwargs,
|
|
||||||
response_obj=result,
|
|
||||||
start_time=start_time,
|
|
||||||
end_time=end_time,
|
|
||||||
print_verbose=print_verbose,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif callback == "traceloop":
|
|
||||||
traceloopLogger.log_event(
|
|
||||||
kwargs=kwargs,
|
|
||||||
response_obj=result,
|
|
||||||
start_time=start_time,
|
|
||||||
end_time=end_time,
|
|
||||||
print_verbose=print_verbose,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif callback == "aispend":
|
elif callback == "aispend":
|
||||||
print_verbose("reaches aispend for logging!")
|
print_verbose("reaches aispend for logging!")
|
||||||
model = args[0] if len(args) > 0 else kwargs["model"]
|
model = args[0] if len(args) > 0 else kwargs["model"]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue