diff --git a/litellm/integrations/braintrust_logging.py b/litellm/integrations/braintrust_logging.py index 0f27bb1022..4a8c948d12 100644 --- a/litellm/integrations/braintrust_logging.py +++ b/litellm/integrations/braintrust_logging.py @@ -11,6 +11,7 @@ from typing import Literal, Optional import dotenv import httpx +from pydantic import BaseModel import litellm from litellm import verbose_logger @@ -280,22 +281,20 @@ class BraintrustLogger(CustomLogger): ) # if litellm_params['metadata'] == None metadata = self.add_metadata_from_header(litellm_params, metadata) clean_metadata = {} - try: - metadata = copy.deepcopy( - metadata - ) # Avoid modifying the original metadata - except: - new_metadata = {} - for key, value in metadata.items(): - if ( - isinstance(value, list) - or isinstance(value, dict) - or isinstance(value, str) - or isinstance(value, int) - or isinstance(value, float) - ): - new_metadata[key] = copy.deepcopy(value) - metadata = new_metadata + new_metadata = {} + for key, value in metadata.items(): + if ( + isinstance(value, list) + or isinstance(value, dict) + or isinstance(value, str) + or isinstance(value, int) + or isinstance(value, float) + ): + new_metadata[key] = value + elif isinstance(value, BaseModel): + new_metadata[key] = value.model_dump_json() + + metadata = new_metadata tags = [] if isinstance(metadata, dict): diff --git a/litellm/llms/predibase.py b/litellm/llms/predibase.py index 8055e06945..d7a10c2f52 100644 --- a/litellm/llms/predibase.py +++ b/litellm/llms/predibase.py @@ -371,6 +371,7 @@ class PredibaseChatCompletion(BaseLLM): response_headers["llm_provider-{}".format(k)] = v model_response._hidden_params["additional_headers"] = response_headers + return model_response def completion( diff --git a/litellm/main.py b/litellm/main.py index fd1adc15ba..1209306c8b 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -128,6 +128,7 @@ from .types.llms.openai import HttpxBinaryResponseContent from .types.utils import ( AdapterCompletionStreamWrapper, ChatCompletionMessageToolCall, + HiddenParams, all_litellm_params, ) @@ -3709,6 +3710,9 @@ async def atext_completion( text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) + text_completion_response._hidden_params = HiddenParams( + **response._hidden_params + ) return text_completion_response except Exception as e: custom_llm_provider = custom_llm_provider or "openai" @@ -3980,6 +3984,7 @@ def text_completion( text_choices["finish_reason"] = response["choices"][0]["finish_reason"] text_completion_response["choices"] = [text_choices] text_completion_response["usage"] = response.get("usage", None) + text_completion_response._hidden_params = HiddenParams(**response._hidden_params) return text_completion_response diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 47b93ccd2f..eff98ae672 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,10 +1,4 @@ model_list: - - model_name: "gpt-4" + - model_name: "*" litellm_params: - model: "gpt-4" - - model_name: "gpt-4" - litellm_params: - model: "gpt-4o" - - model_name: "gpt-4o-mini" - litellm_params: - model: "gpt-4o-mini" \ No newline at end of file + model: "*" \ No newline at end of file