fix simplify - pass litellm_parent_otel_span

This commit is contained in:
Ishaan Jaff 2024-06-07 13:48:21 -07:00
parent 42df97db3c
commit 7ef7bc8a9a
6 changed files with 5 additions and 9 deletions

View file

@ -162,7 +162,8 @@ class OpenTelemetry(CustomLogger):
proxy_server_request = litellm_params.get("proxy_server_request", {}) or {} proxy_server_request = litellm_params.get("proxy_server_request", {}) or {}
headers = proxy_server_request.get("headers", {}) or {} headers = proxy_server_request.get("headers", {}) or {}
traceparent = headers.get("traceparent", None) traceparent = headers.get("traceparent", None)
parent_otel_span = litellm_params.get("litellm_parent_otel_span", None) _metadata = litellm_params.get("metadata", {})
parent_otel_span = _metadata.get("litellm_parent_otel_span", None)
""" """
Two way to use parents in opentelemetry Two way to use parents in opentelemetry

View file

@ -600,7 +600,6 @@ def completion(
client = kwargs.get("client", None) client = kwargs.get("client", None)
### Admin Controls ### ### Admin Controls ###
no_log = kwargs.get("no-log", False) no_log = kwargs.get("no-log", False)
litellm_parent_otel_span = kwargs.get("litellm_parent_otel_span", None)
######## end of unpacking kwargs ########### ######## end of unpacking kwargs ###########
openai_params = [ openai_params = [
"functions", "functions",
@ -690,7 +689,6 @@ def completion(
"allowed_model_region", "allowed_model_region",
"model_config", "model_config",
"fastest_response", "fastest_response",
"litellm_parent_otel_span",
] ]
default_params = openai_params + litellm_params default_params = openai_params + litellm_params
@ -875,7 +873,6 @@ def completion(
input_cost_per_token=input_cost_per_token, input_cost_per_token=input_cost_per_token,
output_cost_per_second=output_cost_per_second, output_cost_per_second=output_cost_per_second,
output_cost_per_token=output_cost_per_token, output_cost_per_token=output_cost_per_token,
litellm_parent_otel_span=litellm_parent_otel_span,
) )
logging.update_environment_variables( logging.update_environment_variables(
model=model, model=model,

View file

@ -106,7 +106,7 @@ async def add_litellm_data_to_request(
data["metadata"]["headers"] = _headers data["metadata"]["headers"] = _headers
data["metadata"]["endpoint"] = str(request.url) data["metadata"]["endpoint"] = str(request.url)
# Add the OTEL Parent Trace before sending it LiteLLM # Add the OTEL Parent Trace before sending it LiteLLM
data["litellm_parent_otel_span"] = user_api_key_dict.parent_otel_span data["metadata"]["litellm_parent_otel_span"] = user_api_key_dict.parent_otel_span
### END-USER SPECIFIC PARAMS ### ### END-USER SPECIFIC PARAMS ###
if user_api_key_dict.allowed_model_region is not None: if user_api_key_dict.allowed_model_region is not None:

View file

@ -73,7 +73,8 @@ def print_verbose(print_statement):
def safe_deep_copy(data): def safe_deep_copy(data):
if isinstance(data, dict): if isinstance(data, dict):
# remove litellm_parent_otel_span since this is not picklable # remove litellm_parent_otel_span since this is not picklable
data.pop("litellm_parent_otel_span", None) if "metadata" in data and "litellm_parent_otel_span" in data["metadata"]:
data["metadata"].pop("litellm_parent_otel_span")
new_data = copy.deepcopy(data) new_data = copy.deepcopy(data)
return new_data return new_data

View file

@ -152,7 +152,6 @@ def test_chat_completion(mock_acompletion, client_no_auth):
specific_deployment=True, specific_deployment=True,
metadata=mock.ANY, metadata=mock.ANY,
proxy_server_request=mock.ANY, proxy_server_request=mock.ANY,
litellm_parent_otel_span=mock.ANY,
) )
print(f"response - {response.text}") print(f"response - {response.text}")
assert response.status_code == 200 assert response.status_code == 200

View file

@ -4918,7 +4918,6 @@ def get_litellm_params(
input_cost_per_token=None, input_cost_per_token=None,
output_cost_per_token=None, output_cost_per_token=None,
output_cost_per_second=None, output_cost_per_second=None,
litellm_parent_otel_span=None,
): ):
litellm_params = { litellm_params = {
"acompletion": acompletion, "acompletion": acompletion,
@ -4941,7 +4940,6 @@ def get_litellm_params(
"input_cost_per_second": input_cost_per_second, "input_cost_per_second": input_cost_per_second,
"output_cost_per_token": output_cost_per_token, "output_cost_per_token": output_cost_per_token,
"output_cost_per_second": output_cost_per_second, "output_cost_per_second": output_cost_per_second,
"litellm_parent_otel_span": litellm_parent_otel_span,
} }
return litellm_params return litellm_params