UI - new API Playground for testing LiteLLM translation (#9073)

* feat: initial commit - enable dev to see translated request

* feat(utils.py): expose new endpoint - `/utils/transform_request` to see the raw request sent by litellm

* feat(transform_request.tsx): allow user to see their transformed request

* refactor(litellm_logging.py): return raw request in 3 parts - api_base, headers, request body

easier to render each individually on UI vs. extracting from combined string

* feat: transform_request.tsx

working e2e raw request viewing

* fix(litellm_logging.py): fix transform viewing for bedrock models

* fix(litellm_logging.py): don't return sensitive headers in raw request headers

prevent accidental leak

* feat(transform_request.tsx): style improvements
This commit is contained in:
Krish Dholakia 2025-03-07 19:39:31 -08:00 committed by GitHub
parent 4ed9db4093
commit 9fc7bd0493
9 changed files with 426 additions and 13 deletions

View file

@ -290,7 +290,7 @@ from litellm.types.router import ModelInfo as RouterModelInfo
from litellm.types.router import RouterGeneralSettings, updateDeployment
from litellm.types.utils import CustomHuggingfaceTokenizer
from litellm.types.utils import ModelInfo as ModelMapInfo
from litellm.types.utils import StandardLoggingPayload
from litellm.types.utils import RawRequestTypedDict, StandardLoggingPayload
from litellm.utils import _add_custom_logger_callback_to_specific_event
try:
@ -5604,6 +5604,18 @@ async def supported_openai_params(model: str):
)
@router.post(
"/utils/transform_request",
tags=["llm utils"],
dependencies=[Depends(user_api_key_auth)],
response_model=RawRequestTypedDict,
)
async def transform_request(request: TransformRequestBody):
from litellm.utils import return_raw_request
return return_raw_request(endpoint=request.call_type, kwargs=request.request_body)
#### [BETA] - This is a beta endpoint, format might change based on user feedback. - https://github.com/BerriAI/litellm/issues/964
@router.post(
"/model/new",