forked from phoenix/litellm-mirror
(fix) Langfuse v2 renamed a few things.
This commit is contained in:
parent
3b6b497672
commit
8682573187
2 changed files with 13 additions and 5 deletions
|
@ -306,6 +306,8 @@ class LangFuseLogger:
|
||||||
tags.append(f"cache_hit:{kwargs['cache_hit']}")
|
tags.append(f"cache_hit:{kwargs['cache_hit']}")
|
||||||
trace_params.update({"tags": tags})
|
trace_params.update({"tags": tags})
|
||||||
|
|
||||||
|
print_verbose(f"trace_params: {trace_params}")
|
||||||
|
|
||||||
trace = self.Langfuse.trace(**trace_params)
|
trace = self.Langfuse.trace(**trace_params)
|
||||||
|
|
||||||
generation_id = None
|
generation_id = None
|
||||||
|
@ -324,13 +326,17 @@ class LangFuseLogger:
|
||||||
# just log `litellm-{call_type}` as the generation name
|
# just log `litellm-{call_type}` as the generation name
|
||||||
generation_name = f"litellm-{kwargs.get('call_type', 'completion')}"
|
generation_name = f"litellm-{kwargs.get('call_type', 'completion')}"
|
||||||
|
|
||||||
|
system_fingerprint = response_obj.get("system_fingerprint", None)
|
||||||
|
if system_fingerprint is not None:
|
||||||
|
optional_params["system_fingerprint"] = system_fingerprint
|
||||||
|
|
||||||
generation_params = {
|
generation_params = {
|
||||||
"name": generation_name,
|
"name": generation_name,
|
||||||
"id": metadata.get("generation_id", generation_id),
|
"id": metadata.get("generation_id", generation_id),
|
||||||
"startTime": start_time,
|
"start_time": start_time,
|
||||||
"endTime": end_time,
|
"end_time": end_time,
|
||||||
"model": kwargs["model"],
|
"model": kwargs["model"],
|
||||||
"modelParameters": optional_params,
|
"model_parameters": optional_params,
|
||||||
"input": input,
|
"input": input,
|
||||||
"output": output,
|
"output": output,
|
||||||
"usage": usage,
|
"usage": usage,
|
||||||
|
@ -342,13 +348,15 @@ class LangFuseLogger:
|
||||||
generation_params["prompt"] = metadata.get("prompt", None)
|
generation_params["prompt"] = metadata.get("prompt", None)
|
||||||
|
|
||||||
if output is not None and isinstance(output, str) and level == "ERROR":
|
if output is not None and isinstance(output, str) and level == "ERROR":
|
||||||
generation_params["statusMessage"] = output
|
generation_params["status_message"] = output
|
||||||
|
|
||||||
if supports_completion_start_time:
|
if supports_completion_start_time:
|
||||||
generation_params["completion_start_time"] = kwargs.get(
|
generation_params["completion_start_time"] = kwargs.get(
|
||||||
"completion_start_time", None
|
"completion_start_time", None
|
||||||
)
|
)
|
||||||
|
|
||||||
|
print_verbose(f"generation_params: {generation_params}")
|
||||||
|
|
||||||
trace.generation(**generation_params)
|
trace.generation(**generation_params)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Langfuse Layer Error - {traceback.format_exc()}")
|
print(f"Langfuse Layer Error - {traceback.format_exc()}")
|
||||||
|
|
|
@ -18,7 +18,7 @@ google-cloud-aiplatform==1.43.0 # for vertex ai calls
|
||||||
anthropic[vertex]==0.21.3
|
anthropic[vertex]==0.21.3
|
||||||
google-generativeai==0.3.2 # for vertex ai calls
|
google-generativeai==0.3.2 # for vertex ai calls
|
||||||
async_generator==1.10.0 # for async ollama calls
|
async_generator==1.10.0 # for async ollama calls
|
||||||
langfuse>=2.6.3 # for langfuse self-hosted logging
|
langfuse>=2.7.3 # for langfuse self-hosted logging
|
||||||
datadog-api-client==2.23.0 # for datadog logging
|
datadog-api-client==2.23.0 # for datadog logging
|
||||||
prometheus_client==0.20.0 # for /metrics endpoint on proxy
|
prometheus_client==0.20.0 # for /metrics endpoint on proxy
|
||||||
orjson==3.9.15 # fast /embedding responses
|
orjson==3.9.15 # fast /embedding responses
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue