Add datadog health check support + fix bedrock converse cost tracking w/ region name specified (#7958)

* fix(bedrock/converse_handler.py): fix bedrock region name on async calls

* fix(utils.py): fix split model handling

Fixes bedrock cost calculation when region name is given

* feat(_health_endpoints.py): support health checking datadog integration

Closes https://github.com/BerriAI/litellm/issues/7921
This commit is contained in:
Krish Dholakia 2025-01-23 22:17:09 -08:00 committed by GitHub
parent a835baacfc
commit c6e9240405
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 254 additions and 33 deletions

View file

@ -3341,3 +3341,85 @@ def _get_traceback_str_for_error(error_str: str) -> str:
function wrapped with lru_cache to limit the number of times `traceback.format_exc()` is called
"""
return traceback.format_exc()
from decimal import Decimal
# used for unit testing
from typing import Any, Dict, List, Optional, Union
def create_dummy_standard_logging_payload() -> StandardLoggingPayload:
# First create the nested objects with proper typing
model_info = StandardLoggingModelInformation(
model_map_key="gpt-3.5-turbo", model_map_value=None
)
metadata = StandardLoggingMetadata( # type: ignore
user_api_key_hash=str("test_hash"),
user_api_key_alias=str("test_alias"),
user_api_key_team_id=str("test_team"),
user_api_key_user_id=str("test_user"),
user_api_key_team_alias=str("test_team_alias"),
user_api_key_org_id=None,
spend_logs_metadata=None,
requester_ip_address=str("127.0.0.1"),
requester_metadata=None,
user_api_key_end_user_id=str("test_end_user"),
)
hidden_params = StandardLoggingHiddenParams(
model_id=None,
cache_key=None,
api_base=None,
response_cost=None,
additional_headers=None,
litellm_overhead_time_ms=None,
)
# Convert numeric values to appropriate types
response_cost = Decimal("0.1")
start_time = Decimal("1234567890.0")
end_time = Decimal("1234567891.0")
completion_start_time = Decimal("1234567890.5")
saved_cache_cost = Decimal("0.0")
# Create messages and response with proper typing
messages: List[Dict[str, str]] = [{"role": "user", "content": "Hello, world!"}]
response: Dict[str, List[Dict[str, Dict[str, str]]]] = {
"choices": [{"message": {"content": "Hi there!"}}]
}
# Main payload initialization
return StandardLoggingPayload( # type: ignore
id=str("test_id"),
call_type=str("completion"),
stream=bool(False),
response_cost=response_cost,
response_cost_failure_debug_info=None,
status=str("success"),
total_tokens=int(30),
prompt_tokens=int(20),
completion_tokens=int(10),
startTime=start_time,
endTime=end_time,
completionStartTime=completion_start_time,
model_map_information=model_info,
model=str("gpt-3.5-turbo"),
model_id=str("model-123"),
model_group=str("openai-gpt"),
custom_llm_provider=str("openai"),
api_base=str("https://api.openai.com"),
metadata=metadata,
cache_hit=bool(False),
cache_key=None,
saved_cache_cost=saved_cache_cost,
request_tags=[],
end_user=None,
requester_ip_address=str("127.0.0.1"),
messages=messages,
response=response,
error_str=None,
model_parameters={"stream": True},
hidden_params=hidden_params,
)

View file

@ -110,7 +110,7 @@ def _set_duration_in_model_call_details(
if logging_obj and hasattr(logging_obj, "model_call_details"):
logging_obj.model_call_details["llm_api_duration_ms"] = duration_ms
else:
verbose_logger.warning(
verbose_logger.debug(
"`logging_obj` not found - unable to track `llm_api_duration_ms"
)
except Exception as e: