forked from phoenix/litellm-mirror
fix arize handle optional params (#6243)
This commit is contained in:
parent
1994100028
commit
6909d8e11b
3 changed files with 106 additions and 74 deletions
|
@ -4,8 +4,11 @@ arize AI is OTEL compatible
|
|||
this file has Arize ai specific helper functions
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from opentelemetry.trace import Span as _Span
|
||||
|
||||
|
@ -14,6 +17,21 @@ else:
|
|||
Span = Any
|
||||
|
||||
|
||||
def make_json_serializable(payload: dict) -> dict:
|
||||
for key, value in payload.items():
|
||||
try:
|
||||
if isinstance(value, dict):
|
||||
# recursively sanitize dicts
|
||||
payload[key] = make_json_serializable(value.copy())
|
||||
elif not isinstance(value, (str, int, float, bool, type(None))):
|
||||
# everything else becomes a string
|
||||
payload[key] = str(value)
|
||||
except Exception:
|
||||
# non blocking if it can't cast to a str
|
||||
pass
|
||||
return payload
|
||||
|
||||
|
||||
def set_arize_ai_attributes(span: Span, kwargs, response_obj):
|
||||
from litellm.integrations._types.open_inference import (
|
||||
MessageAttributes,
|
||||
|
@ -22,6 +40,8 @@ def set_arize_ai_attributes(span: Span, kwargs, response_obj):
|
|||
SpanAttributes,
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
optional_params = kwargs.get("optional_params", {})
|
||||
# litellm_params = kwargs.get("litellm_params", {}) or {}
|
||||
|
||||
|
@ -41,7 +61,8 @@ def set_arize_ai_attributes(span: Span, kwargs, response_obj):
|
|||
span.set_attribute(SpanAttributes.LLM_MODEL_NAME, kwargs.get("model"))
|
||||
|
||||
span.set_attribute(
|
||||
SpanAttributes.OPENINFERENCE_SPAN_KIND, OpenInferenceSpanKindValues.LLM.value
|
||||
SpanAttributes.OPENINFERENCE_SPAN_KIND,
|
||||
OpenInferenceSpanKindValues.LLM.value,
|
||||
)
|
||||
messages = kwargs.get("messages")
|
||||
|
||||
|
@ -67,7 +88,11 @@ def set_arize_ai_attributes(span: Span, kwargs, response_obj):
|
|||
)
|
||||
|
||||
# The Generative AI Provider: Azure, OpenAI, etc.
|
||||
span.set_attribute(SpanAttributes.LLM_INVOCATION_PARAMETERS, str(optional_params))
|
||||
_optional_params = make_json_serializable(optional_params)
|
||||
_json_optional_params = json.dumps(_optional_params)
|
||||
span.set_attribute(
|
||||
SpanAttributes.LLM_INVOCATION_PARAMETERS, _json_optional_params
|
||||
)
|
||||
|
||||
if optional_params.get("user"):
|
||||
span.set_attribute(SpanAttributes.USER_ID, optional_params.get("user"))
|
||||
|
@ -112,3 +137,5 @@ def set_arize_ai_attributes(span: Span, kwargs, response_obj):
|
|||
usage.get("prompt_tokens"),
|
||||
)
|
||||
pass
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(f"Error setting arize attributes: {e}")
|
||||
|
|
|
@ -6,5 +6,5 @@ model_list:
|
|||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||
|
||||
litellm_settings:
|
||||
callbacks: ["otel"]
|
||||
callbacks: ["arize"]
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ from dotenv import load_dotenv
|
|||
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm._logging import verbose_logger, verbose_proxy_logger
|
||||
from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
|
||||
|
||||
load_dotenv()
|
||||
|
@ -18,6 +18,9 @@ import logging
|
|||
@pytest.mark.asyncio()
|
||||
async def test_async_otel_callback():
|
||||
litellm.set_verbose = True
|
||||
|
||||
verbose_proxy_logger.setLevel(logging.DEBUG)
|
||||
verbose_logger.setLevel(logging.DEBUG)
|
||||
litellm.success_callback = ["arize"]
|
||||
|
||||
await litellm.acompletion(
|
||||
|
@ -27,3 +30,5 @@ async def test_async_otel_callback():
|
|||
temperature=0.1,
|
||||
user="OTEL_USER",
|
||||
)
|
||||
|
||||
await asyncio.sleep(2)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue