litellm-mirror/litellm/llms/litellm_proxy/chat/transformation.py
Krish Dholakia f08a4e3c06
Support 'file' message type for VLLM video url's + Anthropic redacted message thinking support (#10129)
* feat(hosted_vllm/chat/transformation.py): support calling vllm video url with openai 'file' message type

allows switching between gemini/vllm easily

* [WIP] redacted thinking tests (#9044)

* WIP: redacted thinking tests

* test: add test for redacted thinking in assistant message

---------

Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com>

* fix(anthropic/chat/transformation.py): support redacted thinking block on anthropic completion

Fixes https://github.com/BerriAI/litellm/issues/9058

* fix(anthropic/chat/handler.py): transform anthropic redacted messages on streaming

Fixes https://github.com/BerriAI/litellm/issues/9058

* fix(bedrock/): support redacted text on streaming + non-streaming

Fixes https://github.com/BerriAI/litellm/issues/9058

* feat(litellm_proxy/chat/transformation.py): support 'reasoning_effort' param for proxy

allows using reasoning effort with thinking models on proxy

* test: update tests

* fix(utils.py): fix linting error

* fix: fix linting errors

* fix: fix linting errors

* fix: fix linting error

* fix: fix linting errors

* fix(anthropic/chat/transformation.py): fix returning citations in chat completion

---------

Co-authored-by: Johann Miller <22018973+johannkm@users.noreply.github.com>
2025-04-19 11:16:37 -07:00

54 lines
2 KiB
Python

"""
Translate from OpenAI's `/v1/chat/completions` to VLLM's `/v1/chat/completions`
"""
from typing import List, Optional, Tuple
from litellm.secret_managers.main import get_secret_str
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
class LiteLLMProxyChatConfig(OpenAIGPTConfig):
def get_supported_openai_params(self, model: str) -> List:
list = super().get_supported_openai_params(model)
list.append("thinking")
list.append("reasoning_effort")
return list
def _map_openai_params(
self,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: bool,
) -> dict:
supported_openai_params = self.get_supported_openai_params(model)
for param, value in non_default_params.items():
if param == "thinking":
optional_params.setdefault("extra_body", {})["thinking"] = value
elif param in supported_openai_params:
optional_params[param] = value
return optional_params
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
api_base = api_base or get_secret_str("LITELLM_PROXY_API_BASE") # type: ignore
dynamic_api_key = api_key or get_secret_str("LITELLM_PROXY_API_KEY")
return api_base, dynamic_api_key
def get_models(
self, api_key: Optional[str] = None, api_base: Optional[str] = None
) -> List[str]:
api_base, api_key = self._get_openai_compatible_provider_info(api_base, api_key)
if api_base is None:
raise ValueError(
"api_base not set for LiteLLM Proxy route. Set in env via `LITELLM_PROXY_API_BASE`"
)
models = super().get_models(api_key=api_key, api_base=api_base)
return [f"litellm_proxy/{model}" for model in models]
@staticmethod
def get_api_key(api_key: Optional[str] = None) -> Optional[str]:
return api_key or get_secret_str("LITELLM_PROXY_API_KEY")