mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
fix(transformation.py): correctly translate 'thinking' param for lite… (#9904)
* fix(transformation.py): correctly translate 'thinking' param for litellm_proxy/ route Fixes https://github.com/BerriAI/litellm/issues/9892 * test: update test
This commit is contained in:
parent
b9f01c9f5b
commit
069aee9f70
3 changed files with 32 additions and 1 deletions
|
@ -15,6 +15,21 @@ class LiteLLMProxyChatConfig(OpenAIGPTConfig):
|
|||
list.append("thinking")
|
||||
return list
|
||||
|
||||
def _map_openai_params(
|
||||
self,
|
||||
non_default_params: dict,
|
||||
optional_params: dict,
|
||||
model: str,
|
||||
drop_params: bool,
|
||||
) -> dict:
|
||||
supported_openai_params = self.get_supported_openai_params(model)
|
||||
for param, value in non_default_params.items():
|
||||
if param == "thinking":
|
||||
optional_params.setdefault("extra_body", {})["thinking"] = value
|
||||
elif param in supported_openai_params:
|
||||
optional_params[param] = value
|
||||
return optional_params
|
||||
|
||||
def _get_openai_compatible_provider_info(
|
||||
self, api_base: Optional[str], api_key: Optional[str]
|
||||
) -> Tuple[Optional[str], Optional[str]]:
|
||||
|
|
|
@ -449,3 +449,19 @@ def test_litellm_gateway_from_sdk_with_response_cost_in_additional_headers():
|
|||
)
|
||||
|
||||
assert response._hidden_params["response_cost"] == 120
|
||||
|
||||
|
||||
def test_litellm_gateway_from_sdk_with_thinking_param():
|
||||
try:
|
||||
response = litellm.completion(
|
||||
model="litellm_proxy/anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
messages=[{"role": "user", "content": "Hello world"}],
|
||||
api_base="http://0.0.0.0:4000",
|
||||
api_key="sk-PIp1h0RekR",
|
||||
# client=openai_client,
|
||||
thinking={"type": "enabled", "max_budget": 100},
|
||||
)
|
||||
pytest.fail("Expected an error to be raised")
|
||||
except Exception as e:
|
||||
assert "Connection error." in str(e)
|
||||
|
||||
|
|
|
@ -1410,7 +1410,7 @@ def test_litellm_proxy_thinking_param():
|
|||
custom_llm_provider="litellm_proxy",
|
||||
thinking={"type": "enabled", "budget_tokens": 1024},
|
||||
)
|
||||
assert optional_params["thinking"] == {"type": "enabled", "budget_tokens": 1024}
|
||||
assert optional_params["extra_body"]["thinking"] == {"type": "enabled", "budget_tokens": 1024}
|
||||
|
||||
def test_gemini_modalities_param():
|
||||
optional_params = get_optional_params(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue