(fix) add /metrics to utils.py

This commit is contained in:
Ishaan Jaff 2024-03-19 17:28:33 -07:00
parent aa1c480452
commit c196186190
2 changed files with 37 additions and 4 deletions

View file

@ -12,7 +12,7 @@ import litellm, uuid
from litellm._logging import print_verbose, verbose_logger from litellm._logging import print_verbose, verbose_logger
class prometheusLogger: class PrometheusLogger:
# Class variables or attributes # Class variables or attributes
def __init__( def __init__(
self, self,
@ -42,7 +42,7 @@ class prometheusLogger:
labelnames=["user", "key", "model"], labelnames=["user", "key", "model"],
) )
except Exception as e: except Exception as e:
print_verbose(f"Got exception on init s3 client {str(e)}") print_verbose(f"Got exception on init prometheus client {str(e)}")
raise e raise e
async def _async_log_event( async def _async_log_event(
@ -65,7 +65,7 @@ class prometheusLogger:
litellm_params = kwargs.get("litellm_params", {}) or {} litellm_params = kwargs.get("litellm_params", {}) or {}
proxy_server_request = litellm_params.get("proxy_server_request") or {} proxy_server_request = litellm_params.get("proxy_server_request") or {}
end_user_id = proxy_server_request.get("body", {}).get("user", None) end_user_id = proxy_server_request.get("body", {}).get("user", None)
user_api_key = litellm_params.get("metadata", {}).get("api_key", None) user_api_key = litellm_params.get("metadata", {}).get("user_api_key", None)
tokens_used = response_obj.get("usage", {}).get("total_tokens", 0) tokens_used = response_obj.get("usage", {}).get("total_tokens", 0)
print_verbose( print_verbose(

View file

@ -66,6 +66,7 @@ from .integrations.weights_biases import WeightsBiasesLogger
from .integrations.custom_logger import CustomLogger from .integrations.custom_logger import CustomLogger
from .integrations.langfuse import LangFuseLogger from .integrations.langfuse import LangFuseLogger
from .integrations.datadog import DataDogLogger from .integrations.datadog import DataDogLogger
from .integrations.prometheus import PrometheusLogger
from .integrations.dynamodb import DyanmoDBLogger from .integrations.dynamodb import DyanmoDBLogger
from .integrations.s3 import S3Logger from .integrations.s3 import S3Logger
from .integrations.clickhouse import ClickhouseLogger from .integrations.clickhouse import ClickhouseLogger
@ -123,6 +124,7 @@ weightsBiasesLogger = None
customLogger = None customLogger = None
langFuseLogger = None langFuseLogger = None
dataDogLogger = None dataDogLogger = None
prometheusLogger = None
dynamoLogger = None dynamoLogger = None
s3Logger = None s3Logger = None
genericAPILogger = None genericAPILogger = None
@ -1502,6 +1504,35 @@ class Logging:
user_id=kwargs.get("user", None), user_id=kwargs.get("user", None),
print_verbose=print_verbose, print_verbose=print_verbose,
) )
if callback == "prometheus":
global prometheusLogger
verbose_logger.debug("reaches prometheus for success logging!")
kwargs = {}
for k, v in self.model_call_details.items():
if (
k != "original_response"
): # copy.deepcopy raises errors as this could be a coroutine
kwargs[k] = v
# this only logs streaming once, complete_streaming_response exists i.e when stream ends
if self.stream:
verbose_logger.debug(
f"prometheus: is complete_streaming_response in kwargs: {kwargs.get('complete_streaming_response', None)}"
)
if complete_streaming_response is None:
continue
else:
print_verbose(
"reaches prometheus for streaming logging!"
)
result = kwargs["complete_streaming_response"]
prometheusLogger.log_event(
kwargs=kwargs,
response_obj=result,
start_time=start_time,
end_time=end_time,
user_id=kwargs.get("user", None),
print_verbose=print_verbose,
)
if callback == "generic": if callback == "generic":
global genericAPILogger global genericAPILogger
verbose_logger.debug("reaches langfuse for success logging!") verbose_logger.debug("reaches langfuse for success logging!")
@ -6111,7 +6142,7 @@ def validate_environment(model: Optional[str] = None) -> dict:
def set_callbacks(callback_list, function_id=None): def set_callbacks(callback_list, function_id=None):
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, dynamoLogger, s3Logger, dataDogLogger global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, athinaLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger, customLogger, weightsBiasesLogger, langsmithLogger, dynamoLogger, s3Logger, dataDogLogger, prometheusLogger
try: try:
for callback in callback_list: for callback in callback_list:
print_verbose(f"callback: {callback}") print_verbose(f"callback: {callback}")
@ -6179,6 +6210,8 @@ def set_callbacks(callback_list, function_id=None):
langFuseLogger = LangFuseLogger() langFuseLogger = LangFuseLogger()
elif callback == "datadog": elif callback == "datadog":
dataDogLogger = DataDogLogger() dataDogLogger = DataDogLogger()
elif callback == "prometheus":
prometheusLogger = PrometheusLogger()
elif callback == "dynamodb": elif callback == "dynamodb":
dynamoLogger = DyanmoDBLogger() dynamoLogger = DyanmoDBLogger()
elif callback == "s3": elif callback == "s3":