UI - new API Playground for testing LiteLLM translation (#9073)

* feat: initial commit - enable dev to see translated request

* feat(utils.py): expose new endpoint - `/utils/transform_request` to see the raw request sent by litellm

* feat(transform_request.tsx): allow user to see their transformed request

* refactor(litellm_logging.py): return raw request in 3 parts - api_base, headers, request body

easier to render each individually on UI vs. extracting from combined string

* feat: transform_request.tsx

working e2e raw request viewing

* fix(litellm_logging.py): fix transform viewing for bedrock models

* fix(litellm_logging.py): don't return sensitive headers in raw request headers

prevent accidental leak

* feat(transform_request.tsx): style improvements
This commit is contained in:
Krish Dholakia 2025-03-07 19:39:31 -08:00 committed by GitHub
parent 4ed9db4093
commit 9fc7bd0493
9 changed files with 426 additions and 13 deletions

View file

@ -156,6 +156,7 @@ from litellm.types.utils import (
ModelResponseStream,
ProviderField,
ProviderSpecificModelInfo,
RawRequestTypedDict,
SelectTokenizerResponse,
StreamingChoices,
TextChoices,
@ -6477,3 +6478,48 @@ def add_openai_metadata(metadata: dict) -> dict:
}
return visible_metadata.copy()
def return_raw_request(endpoint: CallTypes, kwargs: dict) -> RawRequestTypedDict:
"""
Return the json str of the request
This is currently in BETA, and tested for `/chat/completions` -> `litellm.completion` calls.
"""
from datetime import datetime
from litellm.litellm_core_utils.litellm_logging import Logging
litellm_logging_obj = Logging(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "hi"}],
stream=False,
call_type="acompletion",
litellm_call_id="1234",
start_time=datetime.now(),
function_id="1234",
log_raw_request_response=True,
)
llm_api_endpoint = getattr(litellm, endpoint.value)
received_exception = ""
try:
llm_api_endpoint(
**kwargs,
litellm_logging_obj=litellm_logging_obj,
api_key="my-fake-api-key", # 👈 ensure the request fails
)
except Exception as e:
received_exception = str(e)
raw_request_typed_dict = litellm_logging_obj.model_call_details.get(
"raw_request_typed_dict"
)
if raw_request_typed_dict:
return cast(RawRequestTypedDict, raw_request_typed_dict)
else:
return RawRequestTypedDict(
error=received_exception,
)