fix(proxy/utils.py): fix add langfuse trace id to alert

Fixing the import after refactor
This commit is contained in:
Krrish Dholakia 2024-06-21 14:55:09 -07:00
parent 5912decd19
commit 941574a921
3 changed files with 46 additions and 35 deletions

View file

@ -1,28 +1,35 @@
#### What this does #### #### What this does ####
# Class for sending Slack Alerts # # Class for sending Slack Alerts #
import dotenv, os, traceback import asyncio
from litellm.proxy._types import UserAPIKeyAuth, CallInfo, AlertType
from litellm._logging import verbose_logger, verbose_proxy_logger
import litellm, threading
from typing import List, Literal, Any, Union, Optional, Dict, Set
from litellm.caching import DualCache
import asyncio, time
import aiohttp
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
import datetime import datetime
from pydantic import BaseModel, Field import os
from enum import Enum
from datetime import datetime as dt, timedelta, timezone
from litellm.integrations.custom_logger import CustomLogger
from litellm.proxy._types import WebhookEvent
import random import random
from typing import TypedDict import threading
from openai import APIError import time
from .email_templates.templates import * import traceback
from datetime import datetime as dt
from datetime import timedelta, timezone
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Set, TypedDict, Union
import aiohttp
import dotenv
from openai import APIError
from pydantic import BaseModel, Field
import litellm
import litellm.litellm_core_utils
import litellm.litellm_core_utils.litellm_logging
import litellm.types import litellm.types
from litellm._logging import verbose_logger, verbose_proxy_logger
from litellm.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from litellm.proxy._types import AlertType, CallInfo, UserAPIKeyAuth, WebhookEvent
from litellm.types.router import LiteLLM_Params from litellm.types.router import LiteLLM_Params
from .email_templates.templates import *
class BaseOutageModel(TypedDict): class BaseOutageModel(TypedDict):
alerts: List[int] alerts: List[int]
@ -250,8 +257,10 @@ class SlackAlerting(CustomLogger):
trace_id = request_data["litellm_logging_obj"].model_call_details[ trace_id = request_data["litellm_logging_obj"].model_call_details[
"litellm_call_id" "litellm_call_id"
] ]
if litellm.utils.langFuseLogger is not None: if litellm.litellm_core_utils.litellm_logging.langFuseLogger is not None:
base_url = litellm.utils.langFuseLogger.Langfuse.base_url base_url = (
litellm.litellm_core_utils.litellm_logging.langFuseLogger.Langfuse.base_url
)
return f"{base_url}/trace/{trace_id}" return f"{base_url}/trace/{trace_id}"
return None return None
@ -1231,8 +1240,7 @@ Model Info:
email_logo_url: Optional[str] = None, email_logo_url: Optional[str] = None,
email_support_contact: Optional[str] = None, email_support_contact: Optional[str] = None,
): ):
from litellm.proxy.proxy_server import premium_user from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
from litellm.proxy.proxy_server import CommonProxyErrors
if premium_user is not True: if premium_user is not True:
if email_logo_url is not None or email_support_contact is not None: if email_logo_url is not None or email_support_contact is not None:
@ -1352,8 +1360,8 @@ Model Info:
Returns -> True if sent, False if not. Returns -> True if sent, False if not.
""" """
from litellm.proxy.utils import send_email
from litellm.proxy.proxy_server import premium_user, prisma_client from litellm.proxy.proxy_server import premium_user, prisma_client
from litellm.proxy.utils import send_email
email_logo_url = os.getenv( email_logo_url = os.getenv(
"SMTP_SENDER_LOGO", os.getenv("EMAIL_LOGO_URL", None) "SMTP_SENDER_LOGO", os.getenv("EMAIL_LOGO_URL", None)
@ -1462,8 +1470,8 @@ Model Info:
if alert_type not in self.alert_types: if alert_type not in self.alert_types:
return return
from datetime import datetime
import json import json
from datetime import datetime
# Get the current timestamp # Get the current timestamp
current_time = datetime.now().strftime("%H:%M:%S") current_time = datetime.now().strftime("%H:%M:%S")

View file

@ -67,18 +67,19 @@ model_list:
max_input_tokens: 80920 max_input_tokens: 80920
litellm_settings: litellm_settings:
json_logs: true success_callback: ["langfuse"]
default_team_settings: failure_callback: ["langfuse"]
- team_id: proj1 # default_team_settings:
success_callback: ["langfuse"] # - team_id: proj1
langfuse_public_key: pk-lf-a65841e9-5192-4397-a679-cfff029fd5b0 # success_callback: ["langfuse"]
langfuse_secret: sk-lf-d58c2891-3717-4f98-89dd-df44826215fd # langfuse_public_key: pk-lf-a65841e9-5192-4397-a679-cfff029fd5b0
langfuse_host: https://us.cloud.langfuse.com # langfuse_secret: sk-lf-d58c2891-3717-4f98-89dd-df44826215fd
- team_id: proj2 # langfuse_host: https://us.cloud.langfuse.com
success_callback: ["langfuse"] # - team_id: proj2
langfuse_public_key: pk-lf-3d789fd1-f49f-4e73-a7d9-1b4e11acbf9a # success_callback: ["langfuse"]
langfuse_secret: sk-lf-11b13aca-b0d4-4cde-9d54-721479dace6d # langfuse_public_key: pk-lf-3d789fd1-f49f-4e73-a7d9-1b4e11acbf9a
langfuse_host: https://us.cloud.langfuse.com # langfuse_secret: sk-lf-11b13aca-b0d4-4cde-9d54-721479dace6d
# langfuse_host: https://us.cloud.langfuse.com
assistant_settings: assistant_settings:
custom_llm_provider: openai custom_llm_provider: openai

View file

@ -462,9 +462,11 @@ class ProxyLogging:
extra_kwargs = {} extra_kwargs = {}
alerting_metadata = {} alerting_metadata = {}
if request_data is not None: if request_data is not None:
_url = self.slack_alerting_instance._add_langfuse_trace_id_to_alert( _url = self.slack_alerting_instance._add_langfuse_trace_id_to_alert(
request_data=request_data request_data=request_data
) )
if _url is not None: if _url is not None:
extra_kwargs["🪢 Langfuse Trace"] = _url extra_kwargs["🪢 Langfuse Trace"] = _url
formatted_message += "\n\n🪢 Langfuse Trace: {}".format(_url) formatted_message += "\n\n🪢 Langfuse Trace: {}".format(_url)