log MCP tool call metadata in SLP

This commit is contained in:
Ishaan Jaff 2025-03-29 15:50:13 -07:00
parent b7b9f9d9da
commit 08a52f4389
5 changed files with 79 additions and 1 deletions

View file

@ -67,6 +67,7 @@ from litellm.types.utils import (
StandardCallbackDynamicParams, StandardCallbackDynamicParams,
StandardLoggingAdditionalHeaders, StandardLoggingAdditionalHeaders,
StandardLoggingHiddenParams, StandardLoggingHiddenParams,
StandardLoggingMCPToolCall,
StandardLoggingMetadata, StandardLoggingMetadata,
StandardLoggingModelCostFailureDebugInformation, StandardLoggingModelCostFailureDebugInformation,
StandardLoggingModelInformation, StandardLoggingModelInformation,
@ -3114,6 +3115,7 @@ class StandardLoggingPayloadSetup:
litellm_params: Optional[dict] = None, litellm_params: Optional[dict] = None,
prompt_integration: Optional[str] = None, prompt_integration: Optional[str] = None,
applied_guardrails: Optional[List[str]] = None, applied_guardrails: Optional[List[str]] = None,
mcp_tool_call_metadata: Optional[StandardLoggingMCPToolCall] = None,
) -> StandardLoggingMetadata: ) -> StandardLoggingMetadata:
""" """
Clean and filter the metadata dictionary to include only the specified keys in StandardLoggingMetadata. Clean and filter the metadata dictionary to include only the specified keys in StandardLoggingMetadata.
@ -3160,6 +3162,7 @@ class StandardLoggingPayloadSetup:
user_api_key_end_user_id=None, user_api_key_end_user_id=None,
prompt_management_metadata=prompt_management_metadata, prompt_management_metadata=prompt_management_metadata,
applied_guardrails=applied_guardrails, applied_guardrails=applied_guardrails,
mcp_tool_call_metadata=mcp_tool_call_metadata,
) )
if isinstance(metadata, dict): if isinstance(metadata, dict):
# Filter the metadata dictionary to include only the specified keys # Filter the metadata dictionary to include only the specified keys
@ -3486,6 +3489,7 @@ def get_standard_logging_object_payload(
litellm_params=litellm_params, litellm_params=litellm_params,
prompt_integration=kwargs.get("prompt_integration", None), prompt_integration=kwargs.get("prompt_integration", None),
applied_guardrails=kwargs.get("applied_guardrails", None), applied_guardrails=kwargs.get("applied_guardrails", None),
mcp_tool_call_metadata=kwargs.get("mcp_tool_call_metadata", None),
) )
_request_body = proxy_server_request.get("body", {}) _request_body = proxy_server_request.get("body", {})
@ -3626,6 +3630,7 @@ def get_standard_logging_metadata(
user_api_key_end_user_id=None, user_api_key_end_user_id=None,
prompt_management_metadata=None, prompt_management_metadata=None,
applied_guardrails=None, applied_guardrails=None,
mcp_tool_call_metadata=None,
) )
if isinstance(metadata, dict): if isinstance(metadata, dict):
# Filter the metadata dictionary to include only the specified keys # Filter the metadata dictionary to include only the specified keys

View file

@ -11,11 +11,13 @@ from fastapi.responses import StreamingResponse
from pydantic import ValidationError from pydantic import ValidationError
from litellm._logging import verbose_logger from litellm._logging import verbose_logger
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy._types import UserAPIKeyAuth
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
from litellm.types.mcp_server.mcp_server_manager import ( from litellm.types.mcp_server.mcp_server_manager import (
ListMCPToolsRestAPIResponseObject, ListMCPToolsRestAPIResponseObject,
) )
from litellm.types.utils import StandardLoggingMCPToolCall
from litellm.utils import client from litellm.utils import client
# Check if MCP is available # Check if MCP is available
@ -124,6 +126,20 @@ if MCP_AVAILABLE:
status_code=400, detail="Request arguments are required" status_code=400, detail="Request arguments are required"
) )
standard_logging_mcp_tool_call: StandardLoggingMCPToolCall = (
_get_standard_logging_mcp_tool_call(
name=name,
arguments=arguments,
)
)
litellm_logging_obj: Optional[LiteLLMLoggingObj] = kwargs.get(
"litellm_logging_obj", None
)
if litellm_logging_obj:
litellm_logging_obj.model_call_details["mcp_tool_call_metadata"] = (
standard_logging_mcp_tool_call
)
# Try managed server tool first # Try managed server tool first
if name in global_mcp_server_manager.tool_name_to_mcp_server_name_mapping: if name in global_mcp_server_manager.tool_name_to_mcp_server_name_mapping:
return await _handle_managed_mcp_tool(name, arguments) return await _handle_managed_mcp_tool(name, arguments)
@ -131,6 +147,25 @@ if MCP_AVAILABLE:
# Fall back to local tool registry # Fall back to local tool registry
return await _handle_local_mcp_tool(name, arguments) return await _handle_local_mcp_tool(name, arguments)
def _get_standard_logging_mcp_tool_call(
name: str,
arguments: Dict[str, Any],
) -> StandardLoggingMCPToolCall:
mcp_server = global_mcp_server_manager._get_mcp_server_from_tool_name(name)
if mcp_server:
mcp_info = mcp_server.mcp_info or {}
return StandardLoggingMCPToolCall(
name=name,
arguments=arguments,
mcp_server_name=mcp_info.get("server_name"),
mcp_server_logo_url=mcp_info.get("logo_url"),
)
else:
return StandardLoggingMCPToolCall(
name=name,
arguments=arguments,
)
async def _handle_managed_mcp_tool( async def _handle_managed_mcp_tool(
name: str, arguments: Dict[str, Any] name: str, arguments: Dict[str, Any]
) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]: ) -> List[Union[MCPTextContent, MCPImageContent, MCPEmbeddedResource]]:

View file

@ -27,6 +27,7 @@ from litellm.types.utils import (
ModelResponse, ModelResponse,
ProviderField, ProviderField,
StandardCallbackDynamicParams, StandardCallbackDynamicParams,
StandardLoggingMCPToolCall,
StandardLoggingPayloadErrorInformation, StandardLoggingPayloadErrorInformation,
StandardLoggingPayloadStatus, StandardLoggingPayloadStatus,
StandardPassThroughResponseObject, StandardPassThroughResponseObject,
@ -1928,6 +1929,7 @@ class SpendLogsMetadata(TypedDict):
] # special param to log k,v pairs to spendlogs for a call ] # special param to log k,v pairs to spendlogs for a call
requester_ip_address: Optional[str] requester_ip_address: Optional[str]
applied_guardrails: Optional[List[str]] applied_guardrails: Optional[List[str]]
mcp_tool_call_metadata: Optional[StandardLoggingMCPToolCall]
status: StandardLoggingPayloadStatus status: StandardLoggingPayloadStatus
proxy_server_request: Optional[str] proxy_server_request: Optional[str]
batch_models: Optional[List[str]] batch_models: Optional[List[str]]

View file

@ -13,7 +13,7 @@ from litellm._logging import verbose_proxy_logger
from litellm.litellm_core_utils.core_helpers import get_litellm_metadata_from_kwargs from litellm.litellm_core_utils.core_helpers import get_litellm_metadata_from_kwargs
from litellm.proxy._types import SpendLogsMetadata, SpendLogsPayload from litellm.proxy._types import SpendLogsMetadata, SpendLogsPayload
from litellm.proxy.utils import PrismaClient, hash_token from litellm.proxy.utils import PrismaClient, hash_token
from litellm.types.utils import StandardLoggingPayload from litellm.types.utils import StandardLoggingMCPToolCall, StandardLoggingPayload
from litellm.utils import get_end_user_id_for_cost_tracking from litellm.utils import get_end_user_id_for_cost_tracking
@ -38,6 +38,7 @@ def _get_spend_logs_metadata(
metadata: Optional[dict], metadata: Optional[dict],
applied_guardrails: Optional[List[str]] = None, applied_guardrails: Optional[List[str]] = None,
batch_models: Optional[List[str]] = None, batch_models: Optional[List[str]] = None,
mcp_tool_call_metadata: Optional[StandardLoggingMCPToolCall] = None,
) -> SpendLogsMetadata: ) -> SpendLogsMetadata:
if metadata is None: if metadata is None:
return SpendLogsMetadata( return SpendLogsMetadata(
@ -55,6 +56,7 @@ def _get_spend_logs_metadata(
error_information=None, error_information=None,
proxy_server_request=None, proxy_server_request=None,
batch_models=None, batch_models=None,
mcp_tool_call_metadata=None,
) )
verbose_proxy_logger.debug( verbose_proxy_logger.debug(
"getting payload for SpendLogs, available keys in metadata: " "getting payload for SpendLogs, available keys in metadata: "
@ -71,6 +73,7 @@ def _get_spend_logs_metadata(
) )
clean_metadata["applied_guardrails"] = applied_guardrails clean_metadata["applied_guardrails"] = applied_guardrails
clean_metadata["batch_models"] = batch_models clean_metadata["batch_models"] = batch_models
clean_metadata["mcp_tool_call_metadata"] = mcp_tool_call_metadata
return clean_metadata return clean_metadata
@ -200,6 +203,11 @@ def get_logging_payload( # noqa: PLR0915
if standard_logging_payload is not None if standard_logging_payload is not None
else None else None
), ),
mcp_tool_call_metadata=(
standard_logging_payload["metadata"].get("mcp_tool_call_metadata", None)
if standard_logging_payload is not None
else None
),
) )
special_usage_fields = ["completion_tokens", "prompt_tokens", "total_tokens"] special_usage_fields = ["completion_tokens", "prompt_tokens", "total_tokens"]

View file

@ -1647,6 +1647,33 @@ class StandardLoggingUserAPIKeyMetadata(TypedDict):
user_api_key_end_user_id: Optional[str] user_api_key_end_user_id: Optional[str]
class StandardLoggingMCPToolCall(TypedDict, total=False):
name: str
"""
Name of the tool to call
"""
arguments: dict
"""
Arguments to pass to the tool
"""
result: dict
"""
Result of the tool call
"""
mcp_server_name: Optional[str]
"""
Name of the MCP server that the tool call was made to
"""
mcp_server_logo_url: Optional[str]
"""
Optional logo URL of the MCP server that the tool call was made to
(this is to render the logo on the logs page on litellm ui)
"""
class StandardBuiltInToolsParams(TypedDict, total=False): class StandardBuiltInToolsParams(TypedDict, total=False):
""" """
Standard built-in OpenAItools parameters Standard built-in OpenAItools parameters
@ -1677,6 +1704,7 @@ class StandardLoggingMetadata(StandardLoggingUserAPIKeyMetadata):
requester_ip_address: Optional[str] requester_ip_address: Optional[str]
requester_metadata: Optional[dict] requester_metadata: Optional[dict]
prompt_management_metadata: Optional[StandardLoggingPromptManagementMetadata] prompt_management_metadata: Optional[StandardLoggingPromptManagementMetadata]
mcp_tool_call_metadata: Optional[StandardLoggingMCPToolCall]
applied_guardrails: Optional[List[str]] applied_guardrails: Optional[List[str]]