mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
fix(langfuse.py): support passing input params for langfuse errors
This commit is contained in:
parent
9be5e2f7e3
commit
bc23a9266e
1 changed files with 23 additions and 32 deletions
|
@ -266,22 +266,15 @@ class LangFuseLogger:
|
|||
|
||||
trace = self.Langfuse.trace(**trace_params)
|
||||
|
||||
if level == "ERROR":
|
||||
trace.generation(
|
||||
level="ERROR", # can be any of DEBUG, DEFAULT, WARNING or ERROR
|
||||
status_message=output, # can be any string (e.g. stringified stack trace or error body)
|
||||
)
|
||||
print(f"SUCCESSFULLY LOGGED ERROR")
|
||||
else:
|
||||
# get generation_id
|
||||
generation_id = None
|
||||
if (
|
||||
response_obj is not None
|
||||
and response_obj.get("id", None) is not None
|
||||
):
|
||||
generation_id = litellm.utils.get_logging_id(
|
||||
start_time, response_obj
|
||||
)
|
||||
usage = None
|
||||
if response_obj is not None and response_obj.get("id", None) is not None:
|
||||
generation_id = litellm.utils.get_logging_id(start_time, response_obj)
|
||||
usage = {
|
||||
"prompt_tokens": response_obj["usage"]["prompt_tokens"],
|
||||
"completion_tokens": response_obj["usage"]["completion_tokens"],
|
||||
"total_cost": cost if supports_costs else None,
|
||||
}
|
||||
trace.generation(
|
||||
name=generation_name,
|
||||
id=metadata.get("generation_id", generation_id),
|
||||
|
@ -291,12 +284,10 @@ class LangFuseLogger:
|
|||
modelParameters=optional_params,
|
||||
input=input,
|
||||
output=output,
|
||||
usage={
|
||||
"prompt_tokens": response_obj["usage"]["prompt_tokens"],
|
||||
"completion_tokens": response_obj["usage"]["completion_tokens"],
|
||||
"total_cost": cost if supports_costs else None,
|
||||
},
|
||||
usage=usage,
|
||||
metadata=metadata,
|
||||
level=level,
|
||||
status_message=output,
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Langfuse Layer Error - {traceback.format_exc()}")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue