From 37e116235aa67d87c8db19a67e97f2994df47c35 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 28 Feb 2025 20:10:09 -0800 Subject: [PATCH] (Feat) - Show Error Logs on LiteLLM UI (#8904) * fix test_moderations_bad_model * use async_post_call_failure_hook * basic logging errors in DB * show status on ui * show status on ui * ui show request / response side by side * stash fixes * working, track raw request * track error info in metadata * fix showing error / request / response logs * show traceback on error viewer * ui with traceback of error * fix async_post_call_failure_hook * fix(http_parsing_utils.py): orjson can throw errors on some emoji's in text, default to json.loads * test_get_error_information * fix code quality * rename proxy track cost callback test * _should_store_errors_in_spend_logs * feature flag error logs * Revert "_should_store_errors_in_spend_logs" This reverts commit 7f345df47762ff3be04e6fde2f13e70019ede4ee. * Revert "feature flag error logs" This reverts commit 0e90c022bbea3550f169118d81e60d711a4024fe. * test_spend_logs_payload * fix OTEL log_db_metrics * fix import json * fix ui linting error * test_async_post_call_failure_hook * test_chat_completion_bad_model_with_spend_logs --------- Co-authored-by: Krrish Dholakia --- docs/my-website/docs/proxy/architecture.md | 2 +- litellm/litellm_core_utils/litellm_logging.py | 16 + litellm/proxy/_types.py | 4 + litellm/proxy/db/log_db_metrics.py | 6 +- .../proxy/hooks/proxy_track_cost_callback.py | 279 ++++++++++------- litellm/proxy/proxy_config.yaml | 1 + litellm/proxy/proxy_server.py | 15 +- .../spend_tracking/spend_tracking_utils.py | 22 +- litellm/proxy/utils.py | 14 +- litellm/types/utils.py | 2 + .../hooks/test_proxy_track_cost_callback.py | 80 +++++ .../logging_callback_tests/test_spend_logs.py | 2 + .../test_standard_logging_payload.py | 1 + .../test_key_generate_prisma.py | 78 +++-- .../test_openai_error_handling.py | 94 ++++++ .../src/components/view_logs/ErrorViewer.tsx | 164 ++++++++++ .../src/components/view_logs/columns.tsx | 66 ++++ .../src/components/view_logs/index.tsx | 293 ++++++++++-------- 18 files changed, 845 insertions(+), 294 deletions(-) create mode 100644 tests/litellm/proxy/hooks/test_proxy_track_cost_callback.py create mode 100644 ui/litellm-dashboard/src/components/view_logs/ErrorViewer.tsx diff --git a/docs/my-website/docs/proxy/architecture.md b/docs/my-website/docs/proxy/architecture.md index 832fd266b6..2b83583ed9 100644 --- a/docs/my-website/docs/proxy/architecture.md +++ b/docs/my-website/docs/proxy/architecture.md @@ -36,7 +36,7 @@ import TabItem from '@theme/TabItem'; - Virtual Key Rate Limit - User Rate Limit - Team Limit - - The `_PROXY_track_cost_callback` updates spend / usage in the LiteLLM database. [Here is everything tracked in the DB per request](https://github.com/BerriAI/litellm/blob/ba41a72f92a9abf1d659a87ec880e8e319f87481/schema.prisma#L172) + - The `_ProxyDBLogger` updates spend / usage in the LiteLLM database. [Here is everything tracked in the DB per request](https://github.com/BerriAI/litellm/blob/ba41a72f92a9abf1d659a87ec880e8e319f87481/schema.prisma#L172) ## Frequently Asked Questions diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index 4d082e29ad..5a0a9c55ef 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -3114,10 +3114,26 @@ class StandardLoggingPayloadSetup: str(original_exception.__class__.__name__) if original_exception else "" ) _llm_provider_in_exception = getattr(original_exception, "llm_provider", "") + + # Get traceback information (first 100 lines) + traceback_info = "" + if original_exception: + tb = getattr(original_exception, "__traceback__", None) + if tb: + import traceback + + tb_lines = traceback.format_tb(tb) + traceback_info = "".join(tb_lines[:100]) # Limit to first 100 lines + + # Get additional error details + error_message = str(original_exception) + return StandardLoggingPayloadErrorInformation( error_code=error_status, error_class=error_class, llm_provider=_llm_provider_in_exception, + traceback=traceback_info, + error_message=error_message if original_exception else "", ) @staticmethod diff --git a/litellm/proxy/_types.py b/litellm/proxy/_types.py index d2f20e56cf..1b5faf3f9c 100644 --- a/litellm/proxy/_types.py +++ b/litellm/proxy/_types.py @@ -26,6 +26,8 @@ from litellm.types.utils import ( ModelResponse, ProviderField, StandardCallbackDynamicParams, + StandardLoggingPayloadErrorInformation, + StandardLoggingPayloadStatus, StandardPassThroughResponseObject, TextCompletionResponse, ) @@ -1854,6 +1856,8 @@ class SpendLogsMetadata(TypedDict): ] # special param to log k,v pairs to spendlogs for a call requester_ip_address: Optional[str] applied_guardrails: Optional[List[str]] + status: StandardLoggingPayloadStatus + error_information: Optional[StandardLoggingPayloadErrorInformation] class SpendLogsPayload(TypedDict): diff --git a/litellm/proxy/db/log_db_metrics.py b/litellm/proxy/db/log_db_metrics.py index b9b443c6db..9bd3350793 100644 --- a/litellm/proxy/db/log_db_metrics.py +++ b/litellm/proxy/db/log_db_metrics.py @@ -64,10 +64,10 @@ def log_db_metrics(func): # in litellm custom callbacks kwargs is passed as arg[0] # https://docs.litellm.ai/docs/observability/custom_callback#callback-functions args is not None - and len(args) > 0 - and isinstance(args[0], dict) + and len(args) > 1 + and isinstance(args[1], dict) ): - passed_kwargs = args[0] + passed_kwargs = args[1] parent_otel_span = _get_parent_otel_span_from_kwargs( kwargs=passed_kwargs ) diff --git a/litellm/proxy/hooks/proxy_track_cost_callback.py b/litellm/proxy/hooks/proxy_track_cost_callback.py index 33e9341706..c8f1c519d4 100644 --- a/litellm/proxy/hooks/proxy_track_cost_callback.py +++ b/litellm/proxy/hooks/proxy_track_cost_callback.py @@ -1,138 +1,205 @@ import asyncio import traceback -from typing import Optional, Union, cast +from datetime import datetime +from typing import Any, Optional, Union, cast import litellm from litellm._logging import verbose_proxy_logger +from litellm.integrations.custom_logger import CustomLogger from litellm.litellm_core_utils.core_helpers import ( _get_parent_otel_span_from_kwargs, get_litellm_metadata_from_kwargs, ) +from litellm.litellm_core_utils.litellm_logging import StandardLoggingPayloadSetup +from litellm.proxy._types import UserAPIKeyAuth from litellm.proxy.auth.auth_checks import log_db_metrics -from litellm.types.utils import StandardLoggingPayload +from litellm.types.utils import ( + StandardLoggingPayload, + StandardLoggingUserAPIKeyMetadata, +) from litellm.utils import get_end_user_id_for_cost_tracking -@log_db_metrics -async def _PROXY_track_cost_callback( - kwargs, # kwargs to completion - completion_response: litellm.ModelResponse, # response from completion - start_time=None, - end_time=None, # start/end time for completion -): - from litellm.proxy.proxy_server import ( - prisma_client, - proxy_logging_obj, - update_cache, - update_database, - ) - - verbose_proxy_logger.debug("INSIDE _PROXY_track_cost_callback") - try: - verbose_proxy_logger.debug( - f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}" - ) - parent_otel_span = _get_parent_otel_span_from_kwargs(kwargs=kwargs) - litellm_params = kwargs.get("litellm_params", {}) or {} - end_user_id = get_end_user_id_for_cost_tracking(litellm_params) - metadata = get_litellm_metadata_from_kwargs(kwargs=kwargs) - user_id = cast(Optional[str], metadata.get("user_api_key_user_id", None)) - team_id = cast(Optional[str], metadata.get("user_api_key_team_id", None)) - org_id = cast(Optional[str], metadata.get("user_api_key_org_id", None)) - key_alias = cast(Optional[str], metadata.get("user_api_key_alias", None)) - end_user_max_budget = metadata.get("user_api_end_user_max_budget", None) - sl_object: Optional[StandardLoggingPayload] = kwargs.get( - "standard_logging_object", None - ) - response_cost = ( - sl_object.get("response_cost", None) - if sl_object is not None - else kwargs.get("response_cost", None) +class _ProxyDBLogger(CustomLogger): + async def async_log_success_event(self, kwargs, response_obj, start_time, end_time): + await self._PROXY_track_cost_callback( + kwargs, response_obj, start_time, end_time ) - if response_cost is not None: - user_api_key = metadata.get("user_api_key", None) - if kwargs.get("cache_hit", False) is True: - response_cost = 0.0 - verbose_proxy_logger.info( - f"Cache Hit: response_cost {response_cost}, for user_id {user_id}" - ) + async def async_post_call_failure_hook( + self, + request_data: dict, + original_exception: Exception, + user_api_key_dict: UserAPIKeyAuth, + ): + from litellm.proxy.proxy_server import update_database - verbose_proxy_logger.debug( - f"user_api_key {user_api_key}, prisma_client: {prisma_client}" + _metadata = dict( + StandardLoggingUserAPIKeyMetadata( + user_api_key_hash=user_api_key_dict.api_key, + user_api_key_alias=user_api_key_dict.key_alias, + user_api_key_user_email=user_api_key_dict.user_email, + user_api_key_user_id=user_api_key_dict.user_id, + user_api_key_team_id=user_api_key_dict.team_id, + user_api_key_org_id=user_api_key_dict.org_id, + user_api_key_team_alias=user_api_key_dict.team_alias, + user_api_key_end_user_id=user_api_key_dict.end_user_id, ) - if _should_track_cost_callback( - user_api_key=user_api_key, - user_id=user_id, - team_id=team_id, - end_user_id=end_user_id, - ): - ## UPDATE DATABASE - await update_database( - token=user_api_key, - response_cost=response_cost, - user_id=user_id, - end_user_id=end_user_id, - team_id=team_id, - kwargs=kwargs, - completion_response=completion_response, - start_time=start_time, - end_time=end_time, - org_id=org_id, - ) + ) + _metadata["user_api_key"] = user_api_key_dict.api_key + _metadata["status"] = "failure" + _metadata["error_information"] = ( + StandardLoggingPayloadSetup.get_error_information( + original_exception=original_exception, + ) + ) - # update cache - asyncio.create_task( - update_cache( + existing_metadata: dict = request_data.get("metadata", None) or {} + existing_metadata.update(_metadata) + existing_metadata["proxy_server_request"] = ( + request_data.get("proxy_server_request", {}) or {} + ) + request_data["litellm_params"] = {} + request_data["litellm_params"]["metadata"] = existing_metadata + + await update_database( + token=user_api_key_dict.api_key, + response_cost=0.0, + user_id=user_api_key_dict.user_id, + end_user_id=user_api_key_dict.end_user_id, + team_id=user_api_key_dict.team_id, + kwargs=request_data, + completion_response=original_exception, + start_time=datetime.now(), + end_time=datetime.now(), + org_id=user_api_key_dict.org_id, + ) + + @log_db_metrics + async def _PROXY_track_cost_callback( + self, + kwargs, # kwargs to completion + completion_response: Optional[ + Union[litellm.ModelResponse, Any] + ], # response from completion + start_time=None, + end_time=None, # start/end time for completion + ): + from litellm.proxy.proxy_server import ( + prisma_client, + proxy_logging_obj, + update_cache, + update_database, + ) + + verbose_proxy_logger.debug("INSIDE _PROXY_track_cost_callback") + try: + verbose_proxy_logger.debug( + f"kwargs stream: {kwargs.get('stream', None)} + complete streaming response: {kwargs.get('complete_streaming_response', None)}" + ) + parent_otel_span = _get_parent_otel_span_from_kwargs(kwargs=kwargs) + litellm_params = kwargs.get("litellm_params", {}) or {} + end_user_id = get_end_user_id_for_cost_tracking(litellm_params) + metadata = get_litellm_metadata_from_kwargs(kwargs=kwargs) + user_id = cast(Optional[str], metadata.get("user_api_key_user_id", None)) + team_id = cast(Optional[str], metadata.get("user_api_key_team_id", None)) + org_id = cast(Optional[str], metadata.get("user_api_key_org_id", None)) + key_alias = cast(Optional[str], metadata.get("user_api_key_alias", None)) + end_user_max_budget = metadata.get("user_api_end_user_max_budget", None) + sl_object: Optional[StandardLoggingPayload] = kwargs.get( + "standard_logging_object", None + ) + response_cost = ( + sl_object.get("response_cost", None) + if sl_object is not None + else kwargs.get("response_cost", None) + ) + + if response_cost is not None: + user_api_key = metadata.get("user_api_key", None) + if kwargs.get("cache_hit", False) is True: + response_cost = 0.0 + verbose_proxy_logger.info( + f"Cache Hit: response_cost {response_cost}, for user_id {user_id}" + ) + + verbose_proxy_logger.debug( + f"user_api_key {user_api_key}, prisma_client: {prisma_client}" + ) + if _should_track_cost_callback( + user_api_key=user_api_key, + user_id=user_id, + team_id=team_id, + end_user_id=end_user_id, + ): + ## UPDATE DATABASE + await update_database( token=user_api_key, + response_cost=response_cost, user_id=user_id, end_user_id=end_user_id, - response_cost=response_cost, team_id=team_id, - parent_otel_span=parent_otel_span, + kwargs=kwargs, + completion_response=completion_response, + start_time=start_time, + end_time=end_time, + org_id=org_id, ) - ) - await proxy_logging_obj.slack_alerting_instance.customer_spend_alert( - token=user_api_key, - key_alias=key_alias, - end_user_id=end_user_id, - response_cost=response_cost, - max_budget=end_user_max_budget, - ) - else: - raise Exception( - "User API key and team id and user id missing from custom callback." - ) - else: - if kwargs["stream"] is not True or ( - kwargs["stream"] is True and "complete_streaming_response" in kwargs - ): - if sl_object is not None: - cost_tracking_failure_debug_info: Union[dict, str] = ( - sl_object["response_cost_failure_debug_info"] # type: ignore - or "response_cost_failure_debug_info is None in standard_logging_object" + # update cache + asyncio.create_task( + update_cache( + token=user_api_key, + user_id=user_id, + end_user_id=end_user_id, + response_cost=response_cost, + team_id=team_id, + parent_otel_span=parent_otel_span, + ) + ) + + await proxy_logging_obj.slack_alerting_instance.customer_spend_alert( + token=user_api_key, + key_alias=key_alias, + end_user_id=end_user_id, + response_cost=response_cost, + max_budget=end_user_max_budget, ) else: - cost_tracking_failure_debug_info = ( - "standard_logging_object not found" + raise Exception( + "User API key and team id and user id missing from custom callback." ) - model = kwargs.get("model") - raise Exception( - f"Cost tracking failed for model={model}.\nDebug info - {cost_tracking_failure_debug_info}\nAdd custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing" + else: + if kwargs["stream"] is not True or ( + kwargs["stream"] is True and "complete_streaming_response" in kwargs + ): + if sl_object is not None: + cost_tracking_failure_debug_info: Union[dict, str] = ( + sl_object["response_cost_failure_debug_info"] # type: ignore + or "response_cost_failure_debug_info is None in standard_logging_object" + ) + else: + cost_tracking_failure_debug_info = ( + "standard_logging_object not found" + ) + model = kwargs.get("model") + raise Exception( + f"Cost tracking failed for model={model}.\nDebug info - {cost_tracking_failure_debug_info}\nAdd custom pricing - https://docs.litellm.ai/docs/proxy/custom_pricing" + ) + except Exception as e: + error_msg = f"Error in tracking cost callback - {str(e)}\n Traceback:{traceback.format_exc()}" + model = kwargs.get("model", "") + metadata = kwargs.get("litellm_params", {}).get("metadata", {}) + error_msg += f"\n Args to _PROXY_track_cost_callback\n model: {model}\n metadata: {metadata}\n" + asyncio.create_task( + proxy_logging_obj.failed_tracking_alert( + error_message=error_msg, + failing_model=model, ) - except Exception as e: - error_msg = f"Error in tracking cost callback - {str(e)}\n Traceback:{traceback.format_exc()}" - model = kwargs.get("model", "") - metadata = kwargs.get("litellm_params", {}).get("metadata", {}) - error_msg += f"\n Args to _PROXY_track_cost_callback\n model: {model}\n metadata: {metadata}\n" - asyncio.create_task( - proxy_logging_obj.failed_tracking_alert( - error_message=error_msg, - failing_model=model, ) - ) - verbose_proxy_logger.exception("Error in tracking cost callback - %s", str(e)) + verbose_proxy_logger.exception( + "Error in tracking cost callback - %s", str(e) + ) def _should_track_cost_callback( diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index f603d5946f..86c32b610a 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -8,4 +8,5 @@ model_list: general_settings: store_model_in_db: true + store_prompts_in_spend_logs: true diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index d82887a9b9..68fee62c76 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -114,6 +114,7 @@ from litellm.litellm_core_utils.core_helpers import ( _get_parent_otel_span_from_kwargs, get_litellm_metadata_from_kwargs, ) +from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler from litellm.proxy._types import * from litellm.proxy.analytics_endpoints.analytics_endpoints import ( @@ -178,7 +179,7 @@ from litellm.proxy.hooks.prompt_injection_detection import ( _OPTIONAL_PromptInjectionDetection, ) from litellm.proxy.hooks.proxy_failure_handler import _PROXY_failure_handler -from litellm.proxy.hooks.proxy_track_cost_callback import _PROXY_track_cost_callback +from litellm.proxy.hooks.proxy_track_cost_callback import _ProxyDBLogger from litellm.proxy.litellm_pre_call_utils import add_litellm_data_to_request from litellm.proxy.management_endpoints.budget_management_endpoints import ( router as budget_management_router, @@ -937,10 +938,7 @@ def load_from_azure_key_vault(use_azure_key_vault: bool = False): def cost_tracking(): global prisma_client if prisma_client is not None: - if isinstance(litellm._async_success_callback, list): - verbose_proxy_logger.debug("setting litellm success callback to track cost") - if (_PROXY_track_cost_callback) not in litellm._async_success_callback: # type: ignore - litellm.logging_callback_manager.add_litellm_async_success_callback(_PROXY_track_cost_callback) # type: ignore + litellm.logging_callback_manager.add_litellm_callback(_ProxyDBLogger()) def error_tracking(): @@ -3727,9 +3725,14 @@ async def chat_completion( # noqa: PLR0915 timeout = getattr( e, "timeout", None ) # returns the timeout set by the wrapper. Used for testing if model-specific timeout are set correctly - + _litellm_logging_obj: Optional[LiteLLMLoggingObj] = data.get( + "litellm_logging_obj", None + ) custom_headers = get_custom_headers( user_api_key_dict=user_api_key_dict, + call_id=( + _litellm_logging_obj.litellm_call_id if _litellm_logging_obj else None + ), version=version, response_cost=0, model_region=getattr(user_api_key_dict, "allowed_model_region", ""), diff --git a/litellm/proxy/spend_tracking/spend_tracking_utils.py b/litellm/proxy/spend_tracking/spend_tracking_utils.py index fad310e763..82b64cd50d 100644 --- a/litellm/proxy/spend_tracking/spend_tracking_utils.py +++ b/litellm/proxy/spend_tracking/spend_tracking_utils.py @@ -47,6 +47,8 @@ def _get_spend_logs_metadata( requester_ip_address=None, additional_usage_values=None, applied_guardrails=None, + status=None or "success", + error_information=None, ) verbose_proxy_logger.debug( "getting payload for SpendLogs, available keys in metadata: " @@ -161,7 +163,6 @@ def get_logging_payload( # noqa: PLR0915 import time id = f"{id}_cache_hit{time.time()}" # SpendLogs does not allow duplicate request_id - try: payload: SpendLogsPayload = SpendLogsPayload( request_id=str(id), @@ -193,7 +194,9 @@ def get_logging_payload( # noqa: PLR0915 model_id=_model_id, requester_ip_address=clean_metadata.get("requester_ip_address", None), custom_llm_provider=kwargs.get("custom_llm_provider", ""), - messages=_get_messages_for_spend_logs_payload(standard_logging_payload), + messages=_get_messages_for_spend_logs_payload( + standard_logging_payload=standard_logging_payload, metadata=metadata + ), response=_get_response_for_spend_logs_payload(standard_logging_payload), ) @@ -293,12 +296,19 @@ async def get_spend_by_team_and_customer( def _get_messages_for_spend_logs_payload( - payload: Optional[StandardLoggingPayload], + standard_logging_payload: Optional[StandardLoggingPayload], + metadata: Optional[dict] = None, ) -> str: - if payload is None: - return "{}" if _should_store_prompts_and_responses_in_spend_logs(): - return json.dumps(payload.get("messages", {})) + metadata = metadata or {} + if metadata.get("status", None) == "failure": + _proxy_server_request = metadata.get("proxy_server_request", {}) + _request_body = _proxy_server_request.get("body", {}) or {} + return json.dumps(_request_body, default=str) + else: + if standard_logging_payload is None: + return "{}" + return json.dumps(standard_logging_payload.get("messages", {})) return "{}" diff --git a/litellm/proxy/utils.py b/litellm/proxy/utils.py index 8042a78139..525c4e684f 100644 --- a/litellm/proxy/utils.py +++ b/litellm/proxy/utils.py @@ -784,13 +784,17 @@ class ProxyLogging: else: _callback = callback # type: ignore if _callback is not None and isinstance(_callback, CustomLogger): - await _callback.async_post_call_failure_hook( - request_data=request_data, - user_api_key_dict=user_api_key_dict, - original_exception=original_exception, + asyncio.create_task( + _callback.async_post_call_failure_hook( + request_data=request_data, + user_api_key_dict=user_api_key_dict, + original_exception=original_exception, + ) ) except Exception as e: - raise e + verbose_proxy_logger.exception( + f"[Non-Blocking] Error in post_call_failure_hook: {e}" + ) return def _is_proxy_only_error( diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 8fadb93ee0..dcaf5f35d1 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -1606,6 +1606,8 @@ class StandardLoggingPayloadErrorInformation(TypedDict, total=False): error_code: Optional[str] error_class: Optional[str] llm_provider: Optional[str] + traceback: Optional[str] + error_message: Optional[str] class StandardLoggingGuardrailInformation(TypedDict, total=False): diff --git a/tests/litellm/proxy/hooks/test_proxy_track_cost_callback.py b/tests/litellm/proxy/hooks/test_proxy_track_cost_callback.py new file mode 100644 index 0000000000..7edfc69a3d --- /dev/null +++ b/tests/litellm/proxy/hooks/test_proxy_track_cost_callback.py @@ -0,0 +1,80 @@ +import json +import os +import sys + +import pytest +from fastapi.testclient import TestClient + +sys.path.insert( + 0, os.path.abspath("../../../..") +) # Adds the parent directory to the system path + +from datetime import datetime +from unittest.mock import AsyncMock, MagicMock, patch + +from litellm.proxy._types import UserAPIKeyAuth +from litellm.proxy.hooks.proxy_track_cost_callback import _ProxyDBLogger +from litellm.types.utils import StandardLoggingPayload + + +@pytest.mark.asyncio +async def test_async_post_call_failure_hook(): + # Setup + logger = _ProxyDBLogger() + + # Mock user_api_key_dict + user_api_key_dict = UserAPIKeyAuth( + api_key="test_api_key", + key_alias="test_alias", + user_email="test@example.com", + user_id="test_user_id", + team_id="test_team_id", + org_id="test_org_id", + team_alias="test_team_alias", + end_user_id="test_end_user_id", + ) + + # Mock request data + request_data = { + "model": "gpt-4", + "messages": [{"role": "user", "content": "Hello"}], + "metadata": {"original_key": "original_value"}, + "proxy_server_request": {"request_id": "test_request_id"}, + } + + # Mock exception + original_exception = Exception("Test exception") + + # Mock update_database function + with patch( + "litellm.proxy.proxy_server.update_database", new_callable=AsyncMock + ) as mock_update_database: + # Call the method + await logger.async_post_call_failure_hook( + request_data=request_data, + original_exception=original_exception, + user_api_key_dict=user_api_key_dict, + ) + + # Assertions + mock_update_database.assert_called_once() + + # Check the arguments passed to update_database + call_args = mock_update_database.call_args[1] + assert call_args["token"] == "test_api_key" + assert call_args["response_cost"] == 0.0 + assert call_args["user_id"] == "test_user_id" + assert call_args["end_user_id"] == "test_end_user_id" + assert call_args["team_id"] == "test_team_id" + assert call_args["org_id"] == "test_org_id" + assert call_args["completion_response"] == original_exception + + # Check that metadata was properly updated + assert "litellm_params" in call_args["kwargs"] + metadata = call_args["kwargs"]["litellm_params"]["metadata"] + assert metadata["user_api_key"] == "test_api_key" + assert metadata["status"] == "failure" + assert "error_information" in metadata + assert metadata["original_key"] == "original_value" + assert "proxy_server_request" in metadata + assert metadata["proxy_server_request"]["request_id"] == "test_request_id" diff --git a/tests/logging_callback_tests/test_spend_logs.py b/tests/logging_callback_tests/test_spend_logs.py index 74dfeb54ad..4c878675d7 100644 --- a/tests/logging_callback_tests/test_spend_logs.py +++ b/tests/logging_callback_tests/test_spend_logs.py @@ -96,6 +96,8 @@ def test_spend_logs_payload(model_id: Optional[str]): }, "api_base": "https://openai-gpt-4-test-v-1.openai.azure.com/", "caching_groups": None, + "error_information": None, + "status": "success", "raw_request": "\n\nPOST Request Sent from LiteLLM:\ncurl -X POST \\\nhttps://openai-gpt-4-test-v-1.openai.azure.com//openai/ \\\n-H 'Authorization: *****' \\\n-d '{'model': 'chatgpt-v-2', 'messages': [{'role': 'system', 'content': 'you are a helpful assistant.\\n'}, {'role': 'user', 'content': 'bom dia'}], 'stream': False, 'max_tokens': 10, 'user': '116544810872468347480', 'extra_body': {}}'\n", }, "model_info": { diff --git a/tests/logging_callback_tests/test_standard_logging_payload.py b/tests/logging_callback_tests/test_standard_logging_payload.py index 084be4756b..07871d3eea 100644 --- a/tests/logging_callback_tests/test_standard_logging_payload.py +++ b/tests/logging_callback_tests/test_standard_logging_payload.py @@ -413,6 +413,7 @@ def test_get_error_information(): assert result["error_code"] == "429" assert result["error_class"] == "RateLimitError" assert result["llm_provider"] == "openai" + assert result["error_message"] == "litellm.RateLimitError: Test error" def test_get_response_time(): diff --git a/tests/proxy_unit_tests/test_key_generate_prisma.py b/tests/proxy_unit_tests/test_key_generate_prisma.py index b9c21c49ee..c47a37ec6a 100644 --- a/tests/proxy_unit_tests/test_key_generate_prisma.py +++ b/tests/proxy_unit_tests/test_key_generate_prisma.py @@ -507,9 +507,9 @@ def test_call_with_user_over_budget(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() resp = ModelResponse( id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", @@ -526,7 +526,7 @@ def test_call_with_user_over_budget(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "stream": False, "litellm_params": { @@ -604,9 +604,9 @@ def test_call_with_end_user_over_budget(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() resp = ModelResponse( id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", @@ -623,7 +623,7 @@ def test_call_with_end_user_over_budget(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "stream": False, "litellm_params": { @@ -711,9 +711,9 @@ def test_call_with_proxy_over_budget(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() resp = ModelResponse( id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", @@ -730,7 +730,7 @@ def test_call_with_proxy_over_budget(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "stream": False, "litellm_params": { @@ -802,9 +802,9 @@ def test_call_with_user_over_budget_stream(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() resp = ModelResponse( id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", @@ -821,7 +821,7 @@ def test_call_with_user_over_budget_stream(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "stream": True, "complete_streaming_response": resp, @@ -908,9 +908,9 @@ def test_call_with_proxy_over_budget_stream(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() resp = ModelResponse( id="chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac", @@ -927,7 +927,7 @@ def test_call_with_proxy_over_budget_stream(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "stream": True, "complete_streaming_response": resp, @@ -1519,9 +1519,9 @@ def test_call_with_key_over_budget(prisma_client): # update spend using track_cost callback, make 2nd request, it should fail from litellm import Choices, Message, ModelResponse, Usage from litellm.caching.caching import Cache - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() litellm.cache = Cache() import time @@ -1544,7 +1544,7 @@ def test_call_with_key_over_budget(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "model": "chatgpt-v-2", "stream": False, @@ -1636,9 +1636,7 @@ def test_call_with_key_over_budget_no_cache(prisma_client): print("result from user auth with new key", result) # update spend using track_cost callback, make 2nd request, it should fail - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger from litellm.proxy.proxy_server import user_api_key_cache user_api_key_cache.in_memory_cache.cache_dict = {} @@ -1668,7 +1666,8 @@ def test_call_with_key_over_budget_no_cache(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + proxy_db_logger = _ProxyDBLogger() + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "model": "chatgpt-v-2", "stream": False, @@ -1874,9 +1873,9 @@ async def test_call_with_key_never_over_budget(prisma_client): import uuid from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() request_id = f"chatcmpl-{uuid.uuid4()}" @@ -1897,7 +1896,7 @@ async def test_call_with_key_never_over_budget(prisma_client): prompt_tokens=210000, completion_tokens=200000, total_tokens=41000 ), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "model": "chatgpt-v-2", "stream": False, @@ -1965,9 +1964,9 @@ async def test_call_with_key_over_budget_stream(prisma_client): import uuid from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger + + proxy_db_logger = _ProxyDBLogger() request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" resp = ModelResponse( @@ -1985,7 +1984,7 @@ async def test_call_with_key_over_budget_stream(prisma_client): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "call_type": "acompletion", "model": "sagemaker-chatgpt-v-2", @@ -2409,9 +2408,7 @@ async def track_cost_callback_helper_fn(generated_key: str, user_id: str): import uuid from litellm import Choices, Message, ModelResponse, Usage - from litellm.proxy.proxy_server import ( - _PROXY_track_cost_callback as track_cost_callback, - ) + from litellm.proxy.proxy_server import _ProxyDBLogger request_id = f"chatcmpl-e41836bb-bb8b-4df2-8e70-8f3e160155ac{uuid.uuid4()}" resp = ModelResponse( @@ -2429,7 +2426,8 @@ async def track_cost_callback_helper_fn(generated_key: str, user_id: str): model="gpt-35-turbo", # azure always has model written like this usage=Usage(prompt_tokens=210, completion_tokens=200, total_tokens=410), ) - await track_cost_callback( + proxy_db_logger = _ProxyDBLogger() + await proxy_db_logger._PROXY_track_cost_callback( kwargs={ "call_type": "acompletion", "model": "sagemaker-chatgpt-v-2", diff --git a/tests/store_model_in_db_tests/test_openai_error_handling.py b/tests/store_model_in_db_tests/test_openai_error_handling.py index ea94c31c81..808a3618b1 100644 --- a/tests/store_model_in_db_tests/test_openai_error_handling.py +++ b/tests/store_model_in_db_tests/test_openai_error_handling.py @@ -115,3 +115,97 @@ def test_missing_model_parameter_curl(curl_command): print("error in response", json.dumps(response, indent=4)) assert "litellm.BadRequestError" in response["error"]["message"] + + +@pytest.mark.asyncio +async def test_chat_completion_bad_model_with_spend_logs(): + """ + Tests that Error Logs are created for failed requests + """ + import json + + key = generate_key_sync() + + # Use httpx to make the request and capture headers + url = "http://0.0.0.0:4000/v1/chat/completions" + headers = {"Authorization": f"Bearer {key}", "Content-Type": "application/json"} + payload = { + "model": "non-existent-model", + "messages": [{"role": "user", "content": "Hello!"}], + } + + with httpx.Client() as client: + response = client.post(url, headers=headers, json=payload) + + # Extract the litellm call ID from headers + litellm_call_id = response.headers.get("x-litellm-call-id") + print(f"Status code: {response.status_code}") + print(f"Headers: {dict(response.headers)}") + print(f"LiteLLM Call ID: {litellm_call_id}") + + # Parse the JSON response body + try: + response_body = response.json() + print(f"Error response: {json.dumps(response_body, indent=4)}") + except json.JSONDecodeError: + print(f"Could not parse response body as JSON: {response.text}") + + assert ( + litellm_call_id is not None + ), "Failed to get LiteLLM Call ID from response headers" + print("waiting for flushing error log to db....") + await asyncio.sleep(15) + + # Now query the spend logs + url = "http://0.0.0.0:4000/spend/logs?request_id=" + litellm_call_id + headers = {"Authorization": f"Bearer sk-1234", "Content-Type": "application/json"} + + with httpx.Client() as client: + response = client.get( + url, + headers=headers, + ) + + assert ( + response.status_code == 200 + ), f"Failed to get spend logs: {response.status_code}" + + spend_logs = response.json() + + # Print the spend logs payload + print(f"Spend logs response: {json.dumps(spend_logs, indent=4)}") + + # Verify we have logs for the failed request + assert len(spend_logs) > 0, "No spend logs found" + + # Check if the error is recorded in the logs + log_entry = spend_logs[0] # Should be the specific log for our litellm_call_id + + # Verify the structure of the log entry + assert log_entry["request_id"] == litellm_call_id + assert log_entry["model"] == "non-existent-model" + assert log_entry["model_group"] == "non-existent-model" + assert log_entry["spend"] == 0.0 + assert log_entry["total_tokens"] == 0 + assert log_entry["prompt_tokens"] == 0 + assert log_entry["completion_tokens"] == 0 + + # Verify metadata fields + assert log_entry["metadata"]["status"] == "failure" + assert "user_api_key" in log_entry["metadata"] + assert "error_information" in log_entry["metadata"] + + # Verify error information + error_info = log_entry["metadata"]["error_information"] + assert "traceback" in error_info + assert error_info["error_code"] == "400" + assert error_info["error_class"] == "BadRequestError" + assert "litellm.BadRequestError" in error_info["error_message"] + assert "non-existent-model" in error_info["error_message"] + + # Verify request details + assert log_entry["cache_hit"] == "False" + assert log_entry["messages"]["model"] == "non-existent-model" + assert log_entry["messages"]["messages"][0]["role"] == "user" + assert log_entry["messages"]["messages"][0]["content"] == "Hello!" + assert log_entry["response"] == {} diff --git a/ui/litellm-dashboard/src/components/view_logs/ErrorViewer.tsx b/ui/litellm-dashboard/src/components/view_logs/ErrorViewer.tsx new file mode 100644 index 0000000000..4a37c39b64 --- /dev/null +++ b/ui/litellm-dashboard/src/components/view_logs/ErrorViewer.tsx @@ -0,0 +1,164 @@ +import React from 'react'; + +interface ErrorViewerProps { + errorInfo: { + error_class?: string; + error_message?: string; + traceback?: string; + llm_provider?: string; + error_code?: string | number; + }; +} + +export const ErrorViewer: React.FC = ({ errorInfo }) => { + const [expandedFrames, setExpandedFrames] = React.useState<{[key: number]: boolean}>({}); + const [allExpanded, setAllExpanded] = React.useState(false); + + // Toggle individual frame + const toggleFrame = (index: number) => { + setExpandedFrames(prev => ({ + ...prev, + [index]: !prev[index] + })); + }; + + // Toggle all frames + const toggleAllFrames = () => { + const newState = !allExpanded; + setAllExpanded(newState); + + if (tracebackFrames.length > 0) { + const newExpandedState: {[key: number]: boolean} = {}; + tracebackFrames.forEach((_, idx) => { + newExpandedState[idx] = newState; + }); + setExpandedFrames(newExpandedState); + } + }; + + // Parse traceback into frames + const parseTraceback = (traceback: string) => { + if (!traceback) return []; + + // Extract file paths, line numbers and code from traceback + const fileLineRegex = /File "([^"]+)", line (\d+)/g; + const matches = Array.from(traceback.matchAll(fileLineRegex)); + + // Create simplified frames + return matches.map(match => { + const filePath = match[1]; + const lineNumber = match[2]; + const fileName = filePath.split('/').pop() || filePath; + + // Extract the context around this frame + const matchIndex = match.index || 0; + const nextMatchIndex = traceback.indexOf('File "', matchIndex + 1); + const frameContent = nextMatchIndex > -1 + ? traceback.substring(matchIndex, nextMatchIndex).trim() + : traceback.substring(matchIndex).trim(); + + // Try to extract the code line + const lines = frameContent.split('\n'); + let code = ''; + if (lines.length > 1) { + code = lines[lines.length - 1].trim(); + } + + return { + filePath, + fileName, + lineNumber, + code, + inFunction: frameContent.includes(' in ') + ? frameContent.split(' in ')[1].split('\n')[0] + : '' + }; + }); + }; + + const tracebackFrames = errorInfo.traceback ? parseTraceback(errorInfo.traceback) : []; + + return ( +
+
+

+ + + + Error Details +

+
+ +
+
+
+ Type: + {errorInfo.error_class || "Unknown Error"} +
+
+ Message: + {errorInfo.error_message || "Unknown error occurred"} +
+
+ + {errorInfo.traceback && ( +
+
+

Traceback

+
+ + +
+
+ +
+ {tracebackFrames.map((frame, index) => ( +
+
toggleFrame(index)} + > +
+ {frame.lineNumber} + {frame.fileName} + in + {frame.inFunction || frame.fileName} +
+ + + +
+ {(expandedFrames[index] || false) && frame.code && ( +
+ {frame.code} +
+ )} +
+ ))} +
+
+ )} +
+
+ ); +}; \ No newline at end of file diff --git a/ui/litellm-dashboard/src/components/view_logs/columns.tsx b/ui/litellm-dashboard/src/components/view_logs/columns.tsx index 3c37739b7c..0652e3a8bf 100644 --- a/ui/litellm-dashboard/src/components/view_logs/columns.tsx +++ b/ui/litellm-dashboard/src/components/view_logs/columns.tsx @@ -56,6 +56,24 @@ export const columns: ColumnDef[] = [ accessorKey: "startTime", cell: (info: any) => , }, + { + header: "Status", + accessorKey: "metadata.status", + cell: (info: any) => { + const status = info.getValue() || "Success"; + const isSuccess = status.toLowerCase() !== "failure"; + + return ( + + {isSuccess ? "Success" : "Failure"} + + ); + }, + }, { header: "Request ID", accessorKey: "request_id", @@ -203,3 +221,51 @@ const formatMessage = (message: any): string => { } return String(message); }; + +// Add this new component for displaying request/response with copy buttons +export const RequestResponsePanel = ({ request, response }: { request: any; response: any }) => { + const requestStr = typeof request === 'object' ? JSON.stringify(request, null, 2) : String(request || '{}'); + const responseStr = typeof response === 'object' ? JSON.stringify(response, null, 2) : String(response || '{}'); + + const copyToClipboard = (text: string) => { + navigator.clipboard.writeText(text); + }; + + return ( +
+
+
+

Request

+ +
+
{requestStr}
+
+ +
+
+

Response

+ +
+
{responseStr}
+
+
+ ); +}; diff --git a/ui/litellm-dashboard/src/components/view_logs/index.tsx b/ui/litellm-dashboard/src/components/view_logs/index.tsx index 9c3c969074..6083e7dad0 100644 --- a/ui/litellm-dashboard/src/components/view_logs/index.tsx +++ b/ui/litellm-dashboard/src/components/view_logs/index.tsx @@ -8,6 +8,9 @@ import { DataTable } from "./table"; import { columns, LogEntry } from "./columns"; import { Row } from "@tanstack/react-table"; import { prefetchLogDetails } from "./prefetch"; +import { RequestResponsePanel } from "./columns"; +import { ErrorViewer } from './ErrorViewer'; + interface SpendLogsTableProps { accessToken: string | null; token: string | null; @@ -574,146 +577,182 @@ function RequestViewer({ row }: { row: Row }) { return input; }; + // Extract error information from metadata if available + const hasError = row.original.metadata?.status === "failure"; + const errorInfo = hasError ? row.original.metadata?.error_information : null; + + // Format the response with error details if present + const formattedResponse = () => { + if (hasError && errorInfo) { + return { + error: { + message: errorInfo.error_message || "An error occurred", + type: errorInfo.error_class || "error", + code: errorInfo.error_code || "unknown", + param: null + } + }; + } + return formatData(row.original.response); + }; + return (
{/* Combined Info Card */}
-

Request Details

+

Request Details

-
-
- Request ID: - {row.original.request_id} -
-
- Api Key: - {row.original.api_key} -
-
- Team ID: - {row.original.team_id} -
-
- Model: - {row.original.model} -
-
- Custom LLM Provider: - {row.original.custom_llm_provider} -
-
- Api Base: - {row.original.api_base} -
-
- Call Type: - {row.original.call_type} -
-
- Spend: - {row.original.spend} -
-
- Total Tokens: - {row.original.total_tokens} -
-
- Prompt Tokens: - {row.original.prompt_tokens} -
-
- Completion Tokens: - {row.original.completion_tokens} -
-
- Start Time: - {row.original.startTime} -
-
- End Time: - {row.original.endTime} -
-
- Cache Hit: - {row.original.cache_hit} -
-
- Cache Key: - {row.original.cache_key} -
- {row?.original?.requester_ip_address && ( +
+
- Request IP Address: - {row?.original?.requester_ip_address} + Request ID: + {row.original.request_id} +
+
+ Model: + {row.original.model} +
+
+ Provider: + {row.original.custom_llm_provider || "-"} +
+
+ Start Time: + {row.original.startTime} +
+
+ End Time: + {row.original.endTime} +
+
+
+
+ Tokens: + {row.original.total_tokens} ({row.original.prompt_tokens}+{row.original.completion_tokens}) +
+
+ Cost: + ${Number(row.original.spend || 0).toFixed(6)} +
+
+ Cache Hit: + {row.original.cache_hit} +
+ {row?.original?.requester_ip_address && ( +
+ IP Address: + {row?.original?.requester_ip_address} +
+ )} +
+ Status: + + {(row.original.metadata?.status || "Success").toLowerCase() !== "failure" ? "Success" : "Failure"} +
- )} -
-
- - {/* Request Card */} -
-
-

Request Tags

-
-
-          {JSON.stringify(formatData(row.original.request_tags), null, 2)}
-        
-
- - {/* Request Card */} -
-
-

Request

- {/*
- - -
*/} -
-
-          {JSON.stringify(formatData(row.original.messages), null, 2)}
-        
-
- - {/* Response Card */} -
-
-

Response

-
- {/* - */}
-
-          {JSON.stringify(formatData(row.original.response), null, 2)}
-        
- {/* Metadata Card */} - {row.original.metadata && - Object.keys(row.original.metadata).length > 0 && ( -
-
-

Metadata

- {/*
- -
*/} -
-
-              {JSON.stringify(row.original.metadata, null, 2)}
-            
+ {/* Request/Response Panel */} +
+ {/* Request Side */} +
+
+

Request

+
- )} +
+
{JSON.stringify(formatData(row.original.messages), null, 2)}
+
+
+ + {/* Response Side */} +
+
+

+ Response + {hasError && ( + + • HTTP code {errorInfo?.error_code || 400} + + )} +

+ +
+
+
{JSON.stringify(formattedResponse(), null, 2)}
+
+
+
+ + + {/* Error Card - Only show for failures */} + {hasError && errorInfo && } + + + {/* Tags Card - Only show if there are tags */} + {row.original.request_tags && Object.keys(row.original.request_tags).length > 0 && ( +
+
+

Request Tags

+
+
+
+ {Object.entries(row.original.request_tags).map(([key, value]) => ( + + {key}: {String(value)} + + ))} +
+
+
+ )} + + {/* Metadata Card - Only show if there's metadata */} + {row.original.metadata && Object.keys(row.original.metadata).length > 0 && ( +
+
+

Metadata

+ +
+
+
{JSON.stringify(row.original.metadata, null, 2)}
+
+
+ )}
); } \ No newline at end of file