TestResponseAPILoggingUtils

This commit is contained in:
Ishaan Jaff 2025-03-12 12:45:03 -07:00
parent f6f5420f0a
commit f88380cfdf

View file

@ -14,6 +14,7 @@ from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfi
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.responses.utils import ResponseAPILoggingUtils, ResponsesAPIRequestUtils
from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams
from litellm.types.utils import Usage
class TestResponsesAPIRequestUtils:
@ -82,3 +83,68 @@ class TestResponsesAPIRequestUtils:
assert "model" not in result
assert result["temperature"] == 0.7
assert result["max_output_tokens"] == 100
class TestResponseAPILoggingUtils:
def test_is_response_api_usage_true(self):
"""Test identification of Response API usage format"""
# Setup
usage = {"input_tokens": 10, "output_tokens": 20}
# Execute
result = ResponseAPILoggingUtils._is_response_api_usage(usage)
# Assert
assert result is True
def test_is_response_api_usage_false(self):
"""Test identification of non-Response API usage format"""
# Setup
usage = {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
# Execute
result = ResponseAPILoggingUtils._is_response_api_usage(usage)
# Assert
assert result is False
def test_transform_response_api_usage_to_chat_usage(self):
"""Test transformation from Response API usage to Chat usage format"""
# Setup
usage = {
"input_tokens": 10,
"output_tokens": 20,
"total_tokens": 30,
"output_tokens_details": {"reasoning_tokens": 5},
}
# Execute
result = ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
usage
)
# Assert
assert isinstance(result, Usage)
assert result.prompt_tokens == 10
assert result.completion_tokens == 20
assert result.total_tokens == 30
def test_transform_response_api_usage_with_none_values(self):
"""Test transformation handles None values properly"""
# Setup
usage = {
"input_tokens": 0, # Changed from None to 0
"output_tokens": 20,
"total_tokens": 20,
"output_tokens_details": {"reasoning_tokens": 5},
}
# Execute
result = ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
usage
)
# Assert
assert result.prompt_tokens == 0
assert result.completion_tokens == 20
assert result.total_tokens == 20