forked from phoenix/litellm-mirror
(feat) use same id to log on s3, langfuse
This commit is contained in:
parent
c9b3a03092
commit
d69edac11b
3 changed files with 21 additions and 38 deletions
|
@ -207,14 +207,6 @@ class LangFuseLogger:
|
|||
if generation_name is None:
|
||||
# just log `litellm-{call_type}` as the generation name
|
||||
generation_name = f"litellm-{kwargs.get('call_type', 'completion')}"
|
||||
response_id = None
|
||||
if response_obj.get("id", None) is not None:
|
||||
response_id = (
|
||||
"time-"
|
||||
+ start_time.strftime("%H-%M-%S-%f")
|
||||
+ "_"
|
||||
+ response_obj.get("id")
|
||||
)
|
||||
|
||||
trace_params = {
|
||||
"name": generation_name,
|
||||
|
@ -233,9 +225,14 @@ class LangFuseLogger:
|
|||
trace_params.update({"tags": tags})
|
||||
|
||||
trace = self.Langfuse.trace(**trace_params)
|
||||
|
||||
# get generation_id
|
||||
generation_id = None
|
||||
if response_obj.get("id", None) is not None:
|
||||
generation_id = litellm.utils.get_logging_id(start_time, response_obj)
|
||||
trace.generation(
|
||||
name=generation_name,
|
||||
id=metadata.get("generation_id", response_id),
|
||||
id=metadata.get("generation_id", generation_id),
|
||||
startTime=start_time,
|
||||
endTime=end_time,
|
||||
model=kwargs["model"],
|
||||
|
@ -249,26 +246,3 @@ class LangFuseLogger:
|
|||
},
|
||||
metadata=metadata,
|
||||
)
|
||||
|
||||
if self.upstream_langfuse:
|
||||
# user wants to log RAW LLM API call in 2nd langfuse project
|
||||
# key change - model=response_obj["model"], instead of input model used
|
||||
# this is useful for litellm proxy, where users need to see analytics on their LLM Endpoints
|
||||
|
||||
trace = self.upstream_langfuse.trace(**trace_params)
|
||||
|
||||
trace.generation(
|
||||
name=generation_name,
|
||||
id=metadata.get("generation_id", None),
|
||||
startTime=start_time,
|
||||
endTime=end_time,
|
||||
model=response_obj["model"],
|
||||
modelParameters=optional_params,
|
||||
input=input,
|
||||
output=output,
|
||||
usage={
|
||||
"prompt_tokens": response_obj["usage"]["prompt_tokens"],
|
||||
"completion_tokens": response_obj["usage"]["completion_tokens"],
|
||||
},
|
||||
metadata=metadata,
|
||||
)
|
||||
|
|
|
@ -128,19 +128,18 @@ class S3Logger:
|
|||
# non blocking if it can't cast to a str
|
||||
pass
|
||||
|
||||
s3_file_name = litellm.utils.get_logging_id(start_time, payload) or ""
|
||||
s3_object_key = (
|
||||
(self.s3_path.rstrip("/") + "/" if self.s3_path else "")
|
||||
+ start_time.strftime('%Y-%m-%d') + "/"
|
||||
+ "time-"
|
||||
+ start_time.strftime('%H-%M-%S-%f')
|
||||
+ "_"
|
||||
+ payload["id"]
|
||||
+ start_time.strftime("%Y-%m-%d")
|
||||
+ "/"
|
||||
+ s3_file_name
|
||||
) # we need the s3 key to include the time, so we log cache hits too
|
||||
s3_object_key += ".json"
|
||||
|
||||
s3_object_download_filename = (
|
||||
"time-"
|
||||
+ start_time.strftime('%Y-%m-%dT%H-%M-%S-%f')
|
||||
+ start_time.strftime("%Y-%m-%dT%H-%M-%S-%f")
|
||||
+ "_"
|
||||
+ payload["id"]
|
||||
+ ".json"
|
||||
|
|
|
@ -8859,3 +8859,13 @@ def print_args_passed_to_litellm(original_function, args, kwargs):
|
|||
except:
|
||||
# This should always be non blocking
|
||||
pass
|
||||
|
||||
|
||||
def get_logging_id(start_time, response_obj):
|
||||
try:
|
||||
response_id = (
|
||||
"time-" + start_time.strftime("%H-%M-%S-%f") + "_" + response_obj.get("id")
|
||||
)
|
||||
return response_id
|
||||
except:
|
||||
return None
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue