diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index f0e3faec7e..b93933a81d 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -5,6 +5,7 @@ import os import traceback from packaging.version import Version +from pydantic import BaseModel import litellm from litellm._logging import verbose_logger @@ -43,8 +44,8 @@ class LangFuseLogger: self.langfuse_debug = os.getenv("LANGFUSE_DEBUG") parameters = { - "public_key": self.public_key, - "secret_key": self.secret_key, + "public_key": "pk-lf-b3db7e8e-c2f6-4fc7-825c-a541a8fbe003", + "secret_key": "sk-lf-b11ef3a8-361c-4445-9652-12318b8596e4", "host": self.langfuse_host, "release": self.langfuse_release, "debug": self.langfuse_debug, @@ -331,7 +332,7 @@ class LangFuseLogger: metadata = copy.deepcopy( metadata ) # Avoid modifying the original metadata - except: + except Exception: new_metadata = {} for key, value in metadata.items(): if ( @@ -342,6 +343,8 @@ class LangFuseLogger: or isinstance(value, float) ): new_metadata[key] = copy.deepcopy(value) + elif isinstance(value, BaseModel): + new_metadata[key] = value.model_dump() metadata = new_metadata supports_tags = Version(langfuse.version.__version__) >= Version("2.6.3") diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 0f1378f4b9..cf983bbd5e 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -1543,6 +1543,13 @@ class Logging: self.model_call_details["end_time"] = end_time self.model_call_details.setdefault("original_response", None) self.model_call_details["response_cost"] = 0 + + if hasattr(exception, "headers") and isinstance(exception.headers, dict): + self.model_call_details.setdefault("litellm_params", {}) + metadata = ( + self.model_call_details["litellm_params"].get("metadata", {}) or {} + ) + metadata.update(exception.headers) return start_time, end_time def failure_handler( diff --git a/litellm/proxy/_experimental/out/404.html b/litellm/proxy/_experimental/out/404.html deleted file mode 100644 index 2beadd6d15..0000000000 --- a/litellm/proxy/_experimental/out/404.html +++ /dev/null @@ -1 +0,0 @@ -404: This page could not be found.LiteLLM Dashboard

404

This page could not be found.

\ No newline at end of file diff --git a/litellm/proxy/_experimental/out/model_hub.html b/litellm/proxy/_experimental/out/model_hub.html deleted file mode 100644 index b417efc8f2..0000000000 --- a/litellm/proxy/_experimental/out/model_hub.html +++ /dev/null @@ -1 +0,0 @@ -LiteLLM Dashboard \ No newline at end of file diff --git a/litellm/proxy/_experimental/out/onboarding.html b/litellm/proxy/_experimental/out/onboarding.html deleted file mode 100644 index d719a1e757..0000000000 --- a/litellm/proxy/_experimental/out/onboarding.html +++ /dev/null @@ -1 +0,0 @@ -LiteLLM Dashboard \ No newline at end of file diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 8b10c7d9f9..08fa2d9546 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -1,11 +1,8 @@ model_list: - - model_name: "*" + - model_name: "predibase-llama" litellm_params: - model: "*" - - model_name: "azure-gpt-4o-mini" - litellm_params: - model: azure/my-gpt-4o-mini - api_key: os.environ/AZURE_API_KEY - api_base: os.environ/AZURE_API_BASE - model_info: - base_model: azure/global-standard/gpt-4o-mini \ No newline at end of file + model: "predibase/llama-3-8b-instruct" + request_timeout: 1 + +litellm_settings: + failure_callback: ["langfuse"]