mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
Litellm dev 01 30 2025 p2 (#8134)
* feat(lowest_tpm_rpm_v2.py): fix redis cache check to use >= instead of > makes it consistent * test(test_custom_guardrails.py): add more unit testing on default on guardrails ensure it runs if user sent guardrail list is empty * docs(quick_start.md): clarify default on guardrails run even if user guardrails list contains other guardrails * refactor(litellm_logging.py): refactor no-log to helper util allows for more consistent behavior * feat(litellm_logging.py): add event hook to verbose logs * fix(litellm_logging.py): add unit testing to ensure `litellm.disable_no_log_param` is respected * docs(logging.md): document how to disable 'no-log' param * test: fix test to handle feb * test: cleanup old bedrock model * fix: fix router check
This commit is contained in:
parent
78a21b66a2
commit
2eee7f978f
10 changed files with 103 additions and 30 deletions
|
@ -867,6 +867,26 @@ class Logging(LiteLLMLoggingBaseClass):
|
|||
|
||||
return None
|
||||
|
||||
def should_run_callback(
|
||||
self, callback: litellm.CALLBACK_TYPES, litellm_params: dict, event_hook: str
|
||||
) -> bool:
|
||||
|
||||
if litellm.global_disable_no_log_param:
|
||||
return True
|
||||
|
||||
if litellm_params.get("no-log", False) is True:
|
||||
# proxy cost tracking cal backs should run
|
||||
|
||||
if not (
|
||||
isinstance(callback, CustomLogger)
|
||||
and "_PROXY_" in callback.__class__.__name__
|
||||
):
|
||||
verbose_logger.debug(
|
||||
f"no-log request, skipping logging for {event_hook} event"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _success_handler_helper_fn(
|
||||
self,
|
||||
result=None,
|
||||
|
@ -1072,14 +1092,13 @@ class Logging(LiteLLMLoggingBaseClass):
|
|||
for callback in callbacks:
|
||||
try:
|
||||
litellm_params = self.model_call_details.get("litellm_params", {})
|
||||
if litellm_params.get("no-log", False) is True:
|
||||
# proxy cost tracking cal backs should run
|
||||
if not (
|
||||
isinstance(callback, CustomLogger)
|
||||
and "_PROXY_" in callback.__class__.__name__
|
||||
):
|
||||
verbose_logger.info("no-log request, skipping logging")
|
||||
continue
|
||||
should_run = self.should_run_callback(
|
||||
callback=callback,
|
||||
litellm_params=litellm_params,
|
||||
event_hook="success_handler",
|
||||
)
|
||||
if not should_run:
|
||||
continue
|
||||
if callback == "promptlayer" and promptLayerLogger is not None:
|
||||
print_verbose("reaches promptlayer for logging!")
|
||||
promptLayerLogger.log_event(
|
||||
|
@ -1626,18 +1645,14 @@ class Logging(LiteLLMLoggingBaseClass):
|
|||
for callback in callbacks:
|
||||
# check if callback can run for this request
|
||||
litellm_params = self.model_call_details.get("litellm_params", {})
|
||||
if litellm_params.get("no-log", False) is True:
|
||||
# proxy cost tracking cal backs should run
|
||||
if not (
|
||||
isinstance(callback, CustomLogger)
|
||||
and "_PROXY_" in callback.__class__.__name__
|
||||
):
|
||||
print_verbose("no-log request, skipping logging")
|
||||
continue
|
||||
should_run = self.should_run_callback(
|
||||
callback=callback,
|
||||
litellm_params=litellm_params,
|
||||
event_hook="async_success_handler",
|
||||
)
|
||||
if not should_run:
|
||||
continue
|
||||
try:
|
||||
if kwargs.get("no-log", False) is True:
|
||||
print_verbose("no-log request, skipping logging")
|
||||
continue
|
||||
if callback == "openmeter" and openMeterLogger is not None:
|
||||
if self.stream is True:
|
||||
if (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue