diff --git a/litellm/main.py b/litellm/main.py index f76d6c5213..596f85f334 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -600,6 +600,7 @@ def completion( client = kwargs.get("client", None) ### Admin Controls ### no_log = kwargs.get("no-log", False) + litellm_parent_otel_span = kwargs.get("litellm_parent_otel_span", None) ######## end of unpacking kwargs ########### openai_params = [ "functions", @@ -689,6 +690,7 @@ def completion( "allowed_model_region", "model_config", "fastest_response", + "litellm_parent_otel_span", ] default_params = openai_params + litellm_params @@ -873,6 +875,7 @@ def completion( input_cost_per_token=input_cost_per_token, output_cost_per_second=output_cost_per_second, output_cost_per_token=output_cost_per_token, + litellm_parent_otel_span=litellm_parent_otel_span, ) logging.update_environment_variables( model=model, diff --git a/litellm/utils.py b/litellm/utils.py index ba6a374674..be7728dfef 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -4918,6 +4918,7 @@ def get_litellm_params( input_cost_per_token=None, output_cost_per_token=None, output_cost_per_second=None, + litellm_parent_otel_span=None, ): litellm_params = { "acompletion": acompletion, @@ -4940,6 +4941,7 @@ def get_litellm_params( "input_cost_per_second": input_cost_per_second, "output_cost_per_token": output_cost_per_token, "output_cost_per_second": output_cost_per_second, + "litellm_parent_otel_span": litellm_parent_otel_span, } return litellm_params @@ -7351,10 +7353,10 @@ def get_provider_fields(custom_llm_provider: str) -> List[ProviderField]: if custom_llm_provider == "databricks": return litellm.DatabricksConfig().get_required_params() - + elif custom_llm_provider == "ollama": return litellm.OllamaConfig().get_required_params() - + else: return []