mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
test test_redact_msgs_from_logs
This commit is contained in:
parent
b154a4a8ad
commit
d274cfeb3f
2 changed files with 51 additions and 1 deletions
|
@ -41,7 +41,7 @@ def redact_message_input_output_from_logging(
|
||||||
# response cleaning
|
# response cleaning
|
||||||
# ChatCompletion Responses
|
# ChatCompletion Responses
|
||||||
if (
|
if (
|
||||||
litellm_logging_obj.stream
|
litellm_logging_obj.stream is True
|
||||||
and "complete_streaming_response" in litellm_logging_obj.model_call_details
|
and "complete_streaming_response" in litellm_logging_obj.model_call_details
|
||||||
):
|
):
|
||||||
_streaming_response = litellm_logging_obj.model_call_details[
|
_streaming_response = litellm_logging_obj.model_call_details[
|
||||||
|
|
|
@ -3,6 +3,7 @@ from unittest import mock
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
import copy
|
import copy
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
import os
|
import os
|
||||||
|
@ -395,3 +396,52 @@ def test_get_supported_openai_params() -> None:
|
||||||
|
|
||||||
# Unmapped provider
|
# Unmapped provider
|
||||||
assert get_supported_openai_params("nonexistent") is None
|
assert get_supported_openai_params("nonexistent") is None
|
||||||
|
|
||||||
|
|
||||||
|
def test_redact_msgs_from_logs():
|
||||||
|
"""
|
||||||
|
Tests that turn_off_message_logging does not modify the response_obj
|
||||||
|
|
||||||
|
On the proxy some users were seeing the redaction impact client side responses
|
||||||
|
"""
|
||||||
|
from litellm.litellm_core_utils.redact_messages import (
|
||||||
|
redact_message_input_output_from_logging,
|
||||||
|
)
|
||||||
|
from litellm.utils import Logging
|
||||||
|
|
||||||
|
litellm.turn_off_message_logging = True
|
||||||
|
|
||||||
|
response_obj = litellm.ModelResponse(
|
||||||
|
choices=[
|
||||||
|
{
|
||||||
|
"finish_reason": "stop",
|
||||||
|
"index": 0,
|
||||||
|
"message": {
|
||||||
|
"content": "I'm LLaMA, an AI assistant developed by Meta AI that can understand and respond to human input in a conversational manner.",
|
||||||
|
"role": "assistant",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
_redacted_response_obj = redact_message_input_output_from_logging(
|
||||||
|
result=response_obj,
|
||||||
|
litellm_logging_obj=Logging(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role": "user", "content": "hi"}],
|
||||||
|
stream=False,
|
||||||
|
call_type="acompletion",
|
||||||
|
litellm_call_id="1234",
|
||||||
|
start_time=datetime.now(),
|
||||||
|
function_id="1234",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Assert the response_obj content is NOT modified
|
||||||
|
assert (
|
||||||
|
response_obj.choices[0].message.content
|
||||||
|
== "I'm LLaMA, an AI assistant developed by Meta AI that can understand and respond to human input in a conversational manner."
|
||||||
|
)
|
||||||
|
|
||||||
|
litellm.turn_off_message_logging = False
|
||||||
|
print("Test passed")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue