fix(proxy/utils.py): fix add langfuse trace id to alert

Fixing the import after refactor
This commit is contained in:
Krrish Dholakia 2024-06-21 14:55:09 -07:00
parent fb98dd70ce
commit 174b345766
3 changed files with 46 additions and 35 deletions

View file

@ -1,28 +1,35 @@
#### What this does ####
# Class for sending Slack Alerts #
import dotenv, os, traceback
from litellm.proxy._types import UserAPIKeyAuth, CallInfo, AlertType
from litellm._logging import verbose_logger, verbose_proxy_logger
import litellm, threading
from typing import List, Literal, Any, Union, Optional, Dict, Set
from litellm.caching import DualCache
import asyncio, time
import aiohttp
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
import asyncio
import datetime
from pydantic import BaseModel, Field
from enum import Enum
from datetime import datetime as dt, timedelta, timezone
from litellm.integrations.custom_logger import CustomLogger
from litellm.proxy._types import WebhookEvent
import os
import random
from typing import TypedDict
from openai import APIError
from .email_templates.templates import *
import threading
import time
import traceback
from datetime import datetime as dt
from datetime import timedelta, timezone
from enum import Enum
from typing import Any, Dict, List, Literal, Optional, Set, TypedDict, Union
import aiohttp
import dotenv
from openai import APIError
from pydantic import BaseModel, Field
import litellm
import litellm.litellm_core_utils
import litellm.litellm_core_utils.litellm_logging
import litellm.types
from litellm._logging import verbose_logger, verbose_proxy_logger
from litellm.caching import DualCache
from litellm.integrations.custom_logger import CustomLogger
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from litellm.proxy._types import AlertType, CallInfo, UserAPIKeyAuth, WebhookEvent
from litellm.types.router import LiteLLM_Params
from .email_templates.templates import *
class BaseOutageModel(TypedDict):
alerts: List[int]
@ -250,8 +257,10 @@ class SlackAlerting(CustomLogger):
trace_id = request_data["litellm_logging_obj"].model_call_details[
"litellm_call_id"
]
if litellm.utils.langFuseLogger is not None:
base_url = litellm.utils.langFuseLogger.Langfuse.base_url
if litellm.litellm_core_utils.litellm_logging.langFuseLogger is not None:
base_url = (
litellm.litellm_core_utils.litellm_logging.langFuseLogger.Langfuse.base_url
)
return f"{base_url}/trace/{trace_id}"
return None
@ -1231,8 +1240,7 @@ Model Info:
email_logo_url: Optional[str] = None,
email_support_contact: Optional[str] = None,
):
from litellm.proxy.proxy_server import premium_user
from litellm.proxy.proxy_server import CommonProxyErrors
from litellm.proxy.proxy_server import CommonProxyErrors, premium_user
if premium_user is not True:
if email_logo_url is not None or email_support_contact is not None:
@ -1352,8 +1360,8 @@ Model Info:
Returns -> True if sent, False if not.
"""
from litellm.proxy.utils import send_email
from litellm.proxy.proxy_server import premium_user, prisma_client
from litellm.proxy.utils import send_email
email_logo_url = os.getenv(
"SMTP_SENDER_LOGO", os.getenv("EMAIL_LOGO_URL", None)
@ -1462,8 +1470,8 @@ Model Info:
if alert_type not in self.alert_types:
return
from datetime import datetime
import json
from datetime import datetime
# Get the current timestamp
current_time = datetime.now().strftime("%H:%M:%S")

View file

@ -67,18 +67,19 @@ model_list:
max_input_tokens: 80920
litellm_settings:
json_logs: true
default_team_settings:
- team_id: proj1
success_callback: ["langfuse"]
langfuse_public_key: pk-lf-a65841e9-5192-4397-a679-cfff029fd5b0
langfuse_secret: sk-lf-d58c2891-3717-4f98-89dd-df44826215fd
langfuse_host: https://us.cloud.langfuse.com
- team_id: proj2
success_callback: ["langfuse"]
langfuse_public_key: pk-lf-3d789fd1-f49f-4e73-a7d9-1b4e11acbf9a
langfuse_secret: sk-lf-11b13aca-b0d4-4cde-9d54-721479dace6d
langfuse_host: https://us.cloud.langfuse.com
success_callback: ["langfuse"]
failure_callback: ["langfuse"]
# default_team_settings:
# - team_id: proj1
# success_callback: ["langfuse"]
# langfuse_public_key: pk-lf-a65841e9-5192-4397-a679-cfff029fd5b0
# langfuse_secret: sk-lf-d58c2891-3717-4f98-89dd-df44826215fd
# langfuse_host: https://us.cloud.langfuse.com
# - team_id: proj2
# success_callback: ["langfuse"]
# langfuse_public_key: pk-lf-3d789fd1-f49f-4e73-a7d9-1b4e11acbf9a
# langfuse_secret: sk-lf-11b13aca-b0d4-4cde-9d54-721479dace6d
# langfuse_host: https://us.cloud.langfuse.com
assistant_settings:
custom_llm_provider: openai

View file

@ -462,9 +462,11 @@ class ProxyLogging:
extra_kwargs = {}
alerting_metadata = {}
if request_data is not None:
_url = self.slack_alerting_instance._add_langfuse_trace_id_to_alert(
request_data=request_data
)
if _url is not None:
extra_kwargs["🪢 Langfuse Trace"] = _url
formatted_message += "\n\n🪢 Langfuse Trace: {}".format(_url)