diff --git a/litellm/integrations/langfuse.py b/litellm/integrations/langfuse.py index f4a581eb9f..9b06ec17f2 100644 --- a/litellm/integrations/langfuse.py +++ b/litellm/integrations/langfuse.py @@ -93,6 +93,7 @@ class LangFuseLogger: ) litellm_params = kwargs.get("litellm_params", {}) + litellm_call_id = kwargs.get("litellm_call_id", None) metadata = ( litellm_params.get("metadata", {}) or {} ) # if litellm_params['metadata'] == None @@ -161,6 +162,7 @@ class LangFuseLogger: response_obj, level, print_verbose, + litellm_call_id, ) elif response_obj is not None: self._log_langfuse_v1( @@ -255,6 +257,7 @@ class LangFuseLogger: response_obj, level, print_verbose, + litellm_call_id, ) -> tuple: import langfuse @@ -318,7 +321,7 @@ class LangFuseLogger: session_id = clean_metadata.pop("session_id", None) trace_name = clean_metadata.pop("trace_name", None) - trace_id = clean_metadata.pop("trace_id", None) + trace_id = clean_metadata.pop("trace_id", litellm_call_id) existing_trace_id = clean_metadata.pop("existing_trace_id", None) update_trace_keys = clean_metadata.pop("update_trace_keys", []) debug = clean_metadata.pop("debug_langfuse", None) @@ -351,9 +354,13 @@ class LangFuseLogger: # Special keys that are found in the function arguments and not the metadata if "input" in update_trace_keys: - trace_params["input"] = input if not mask_input else "redacted-by-litellm" + trace_params["input"] = ( + input if not mask_input else "redacted-by-litellm" + ) if "output" in update_trace_keys: - trace_params["output"] = output if not mask_output else "redacted-by-litellm" + trace_params["output"] = ( + output if not mask_output else "redacted-by-litellm" + ) else: # don't overwrite an existing trace trace_params = { "id": trace_id, @@ -375,7 +382,9 @@ class LangFuseLogger: if level == "ERROR": trace_params["status_message"] = output else: - trace_params["output"] = output if not mask_output else "redacted-by-litellm" + trace_params["output"] = ( + output if not mask_output else "redacted-by-litellm" + ) if debug == True or (isinstance(debug, str) and debug.lower() == "true"): if "metadata" in trace_params: diff --git a/litellm/integrations/slack_alerting.py b/litellm/integrations/slack_alerting.py index 015278c557..00aa0046de 100644 --- a/litellm/integrations/slack_alerting.py +++ b/litellm/integrations/slack_alerting.py @@ -164,13 +164,28 @@ class SlackAlerting(CustomLogger): ) -> Optional[str]: """ Returns langfuse trace url + + - check: + -> existing_trace_id + -> trace_id + -> litellm_call_id """ # do nothing for now - if ( - request_data is not None - and request_data.get("metadata", {}).get("trace_id", None) is not None - ): - trace_id = request_data["metadata"]["trace_id"] + if request_data is not None: + trace_id = None + if ( + request_data.get("metadata", {}).get("existing_trace_id", None) + is not None + ): + trace_id = request_data["metadata"]["existing_trace_id"] + elif request_data.get("metadata", {}).get("trace_id", None) is not None: + trace_id = request_data["metadata"]["trace_id"] + elif request_data.get("litellm_logging_obj", None) is not None and hasattr( + request_data["litellm_logging_obj"], "model_call_details" + ): + trace_id = request_data["litellm_logging_obj"].model_call_details[ + "litellm_call_id" + ] if litellm.utils.langFuseLogger is not None: base_url = litellm.utils.langFuseLogger.Langfuse.base_url return f"{base_url}/trace/{trace_id}" diff --git a/litellm/proxy/_super_secret_config.yaml b/litellm/proxy/_super_secret_config.yaml index f73c36e49a..7bbf9d3cd0 100644 --- a/litellm/proxy/_super_secret_config.yaml +++ b/litellm/proxy/_super_secret_config.yaml @@ -1,4 +1,9 @@ model_list: + - model_name: gpt-3.5-turbo-fake-model + litellm_params: + model: openai/my-fake-model + api_base: http://0.0.0.0:8080 + api_key: "" - model_name: gpt-3.5-turbo litellm_params: model: azure/gpt-35-turbo @@ -14,5 +19,10 @@ model_list: router_settings: enable_pre_call_checks: true -general_settings: - master_key: sk-1234 # [OPTIONAL] Use to enforce auth on proxy. See - https://docs.litellm.ai/docs/proxy/virtual_keys +litellm_settings: + success_callback: ["langfuse"] + failure_callback: ["langfuse"] + +general_settings: + alerting: ["slack"] + \ No newline at end of file diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 972af40120..9d86bf7c29 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -671,15 +671,21 @@ async def user_api_key_auth( _end_user_object = None end_user_params = {} if "user" in request_data: - _end_user_object = await get_end_user_object( - end_user_id=request_data["user"], - prisma_client=prisma_client, - user_api_key_cache=user_api_key_cache, - ) - if _end_user_object is not None: - end_user_params["allowed_model_region"] = ( - _end_user_object.allowed_model_region + try: + _end_user_object = await get_end_user_object( + end_user_id=request_data["user"], + prisma_client=prisma_client, + user_api_key_cache=user_api_key_cache, ) + if _end_user_object is not None: + end_user_params["allowed_model_region"] = ( + _end_user_object.allowed_model_region + ) + except Exception as e: + verbose_proxy_logger.debug( + "Unable to find user in db. Error - {}".format(str(e)) + ) + pass try: is_master_key_valid = secrets.compare_digest(api_key, master_key) # type: ignore