fix(test_custom_callbacks_input.py): unit tests for 'turn_off_message_logging'

ensure no raw request is logged either
This commit is contained in:
Krrish Dholakia 2024-06-07 15:39:15 -07:00
parent 51fb199329
commit f73b6033fd
4 changed files with 72 additions and 9 deletions

View file

@ -1308,14 +1308,28 @@ class Logging:
)
else:
verbose_logger.debug(f"\033[92m{curl_command}\033[0m\n")
# check if user wants the raw request logged to their logging provider (like LangFuse)
# log raw request to provider (like LangFuse)
try:
# [Non-blocking Extra Debug Information in metadata]
_litellm_params = self.model_call_details.get("litellm_params", {})
_metadata = _litellm_params.get("metadata", {}) or {}
_metadata["raw_request"] = str(curl_command)
except:
pass
if (
litellm.turn_off_message_logging is not None
and litellm.turn_off_message_logging is True
):
_metadata["raw_request"] = (
"redacted by litellm. \
'litellm.turn_off_message_logging=True'"
)
else:
_metadata["raw_request"] = str(curl_command)
except Exception as e:
_metadata["raw_request"] = (
"Unable to Log \
raw request: {}".format(
str(e)
)
)
if self.logger_fn and callable(self.logger_fn):
try:
self.logger_fn(
@ -2684,7 +2698,9 @@ class Logging:
# check if user opted out of logging message/response to callbacks
if litellm.turn_off_message_logging == True:
# remove messages, prompts, input, response from logging
self.model_call_details["messages"] = "redacted-by-litellm"
self.model_call_details["messages"] = [
{"role": "user", "content": "redacted-by-litellm"}
]
self.model_call_details["prompt"] = ""
self.model_call_details["input"] = ""
@ -4064,7 +4080,9 @@ def openai_token_counter(
for c in value:
if c["type"] == "text":
text += c["text"]
num_tokens += len(encoding.encode(c["text"], disallowed_special=()))
num_tokens += len(
encoding.encode(c["text"], disallowed_special=())
)
elif c["type"] == "image_url":
if isinstance(c["image_url"], dict):
image_url_dict = c["image_url"]