forked from phoenix/litellm-mirror
fix use redact_message_input_output_from_logging
This commit is contained in:
parent
743dfdb950
commit
b2111a97e2
1 changed files with 43 additions and 47 deletions
|
@ -1212,7 +1212,6 @@ class Logging:
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
|
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
|
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
|
||||||
callbacks = litellm.input_callback + self.dynamic_input_callbacks
|
callbacks = litellm.input_callback + self.dynamic_input_callbacks
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
|
@ -1229,29 +1228,17 @@ class Logging:
|
||||||
litellm_call_id=self.litellm_params["litellm_call_id"],
|
litellm_call_id=self.litellm_params["litellm_call_id"],
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
|
|
||||||
elif callback == "lite_debugger":
|
|
||||||
print_verbose(
|
|
||||||
f"reaches litedebugger for logging! - model_call_details {self.model_call_details}"
|
|
||||||
)
|
|
||||||
model = self.model_call_details["model"]
|
|
||||||
messages = self.model_call_details["input"]
|
|
||||||
print_verbose(f"liteDebuggerClient: {liteDebuggerClient}")
|
|
||||||
liteDebuggerClient.input_log_event(
|
|
||||||
model=model,
|
|
||||||
messages=messages,
|
|
||||||
end_user=self.model_call_details.get("user", "default"),
|
|
||||||
litellm_call_id=self.litellm_params["litellm_call_id"],
|
|
||||||
litellm_params=self.model_call_details["litellm_params"],
|
|
||||||
optional_params=self.model_call_details["optional_params"],
|
|
||||||
print_verbose=print_verbose,
|
|
||||||
call_type=self.call_type,
|
|
||||||
)
|
|
||||||
elif callback == "sentry" and add_breadcrumb:
|
elif callback == "sentry" and add_breadcrumb:
|
||||||
print_verbose("reaches sentry breadcrumbing")
|
details_to_log = copy.deepcopy(self.model_call_details)
|
||||||
|
if litellm.turn_off_message_logging:
|
||||||
|
# make a copy of the _model_Call_details and log it
|
||||||
|
details_to_log.pop("messages", None)
|
||||||
|
details_to_log.pop("input", None)
|
||||||
|
details_to_log.pop("prompt", None)
|
||||||
|
|
||||||
add_breadcrumb(
|
add_breadcrumb(
|
||||||
category="litellm.llm_call",
|
category="litellm.llm_call",
|
||||||
message=f"Model Call Details pre-call: {self.model_call_details}",
|
message=f"Model Call Details pre-call: {details_to_log}",
|
||||||
level="info",
|
level="info",
|
||||||
)
|
)
|
||||||
elif isinstance(callback, CustomLogger): # custom logger class
|
elif isinstance(callback, CustomLogger): # custom logger class
|
||||||
|
@ -1315,7 +1302,7 @@ class Logging:
|
||||||
print_verbose(
|
print_verbose(
|
||||||
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
|
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while logging {traceback.format_exc()}"
|
||||||
)
|
)
|
||||||
|
self.redact_message_input_output_from_logging(result=original_response)
|
||||||
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
|
# Input Integration Logging -> If you want to log the fact that an attempt to call the model was made
|
||||||
|
|
||||||
callbacks = litellm.input_callback + self.dynamic_input_callbacks
|
callbacks = litellm.input_callback + self.dynamic_input_callbacks
|
||||||
|
@ -1527,31 +1514,7 @@ class Logging:
|
||||||
else:
|
else:
|
||||||
callbacks = litellm.success_callback
|
callbacks = litellm.success_callback
|
||||||
|
|
||||||
# check if user opted out of logging message/response to callbacks
|
self.redact_message_input_output_from_logging(result=result)
|
||||||
if litellm.turn_off_message_logging == True:
|
|
||||||
# remove messages, prompts, input, response from logging
|
|
||||||
self.model_call_details["messages"] = "redacted-by-litellm"
|
|
||||||
self.model_call_details["prompt"] = ""
|
|
||||||
self.model_call_details["input"] = ""
|
|
||||||
|
|
||||||
# response cleaning
|
|
||||||
# ChatCompletion Responses
|
|
||||||
if (
|
|
||||||
self.stream
|
|
||||||
and "complete_streaming_response" in self.model_call_details
|
|
||||||
):
|
|
||||||
_streaming_response = self.model_call_details[
|
|
||||||
"complete_streaming_response"
|
|
||||||
]
|
|
||||||
for choice in _streaming_response.choices:
|
|
||||||
choice.message.content = "redacted-by-litellm"
|
|
||||||
else:
|
|
||||||
for choice in result.choices:
|
|
||||||
choice.message.content = "redacted-by-litellm"
|
|
||||||
|
|
||||||
# Embedding Responses
|
|
||||||
|
|
||||||
# Text Completion Responses
|
|
||||||
|
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
try:
|
try:
|
||||||
|
@ -2097,6 +2060,9 @@ class Logging:
|
||||||
callbacks.append(callback)
|
callbacks.append(callback)
|
||||||
else:
|
else:
|
||||||
callbacks = litellm._async_success_callback
|
callbacks = litellm._async_success_callback
|
||||||
|
|
||||||
|
self.redact_message_input_output_from_logging(result=result)
|
||||||
|
|
||||||
print_verbose(f"Async success callbacks: {callbacks}")
|
print_verbose(f"Async success callbacks: {callbacks}")
|
||||||
for callback in callbacks:
|
for callback in callbacks:
|
||||||
# check if callback can run for this request
|
# check if callback can run for this request
|
||||||
|
@ -2258,7 +2224,10 @@ class Logging:
|
||||||
start_time=start_time,
|
start_time=start_time,
|
||||||
end_time=end_time,
|
end_time=end_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
result = None # result sent to all loggers, init this to None incase it's not created
|
result = None # result sent to all loggers, init this to None incase it's not created
|
||||||
|
|
||||||
|
self.redact_message_input_output_from_logging(result=result)
|
||||||
for callback in litellm.failure_callback:
|
for callback in litellm.failure_callback:
|
||||||
try:
|
try:
|
||||||
if callback == "lite_debugger":
|
if callback == "lite_debugger":
|
||||||
|
@ -2443,6 +2412,33 @@ class Logging:
|
||||||
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
|
f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while success logging {traceback.format_exc()}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def redact_message_input_output_from_logging(self, result):
|
||||||
|
"""
|
||||||
|
Removes messages, prompts, input, response from logging. This modifies the data in-place
|
||||||
|
only redacts when litellm.turn_off_message_logging == True
|
||||||
|
"""
|
||||||
|
# check if user opted out of logging message/response to callbacks
|
||||||
|
if litellm.turn_off_message_logging == True:
|
||||||
|
# remove messages, prompts, input, response from logging
|
||||||
|
self.model_call_details["messages"] = "redacted-by-litellm"
|
||||||
|
self.model_call_details["prompt"] = ""
|
||||||
|
self.model_call_details["input"] = ""
|
||||||
|
|
||||||
|
# response cleaning
|
||||||
|
# ChatCompletion Responses
|
||||||
|
if self.stream and "complete_streaming_response" in self.model_call_details:
|
||||||
|
_streaming_response = self.model_call_details[
|
||||||
|
"complete_streaming_response"
|
||||||
|
]
|
||||||
|
for choice in _streaming_response.choices:
|
||||||
|
choice.message.content = "redacted-by-litellm"
|
||||||
|
else:
|
||||||
|
if result is not None:
|
||||||
|
if isinstance(result, litellm.ModelResponse):
|
||||||
|
if hasattr(result, "choices"):
|
||||||
|
for choice in result.choices:
|
||||||
|
choice.message.content = "redacted-by-litellm"
|
||||||
|
|
||||||
|
|
||||||
def exception_logging(
|
def exception_logging(
|
||||||
additional_args={},
|
additional_args={},
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue