[Feat] Add litellm.supports_reasoning() util to track if an llm supports reasoning (#9923)

* add supports_reasoning for xai models

* add "supports_reasoning": true for o1 series models

* add supports_reasoning util

* add litellm.supports_reasoning

* add supports reasoning for claude 3-7 models

* add deepseek as supports reasoning

* test_supports_reasoning

* add supports reasoning to model group info

* add supports_reasoning

* docs supports reasoning

* fix supports_reasoning test

* "supports_reasoning": false,

* fix test

* supports_reasoning
This commit is contained in:
Ishaan Jaff 2025-04-11 17:56:04 -07:00 committed by GitHub
parent 311c70698f
commit f9ce754817
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 301 additions and 73 deletions

View file

@ -514,6 +514,26 @@ def test_supports_web_search(model, expected_bool):
pytest.fail(f"Error occurred: {e}")
@pytest.mark.parametrize(
"model, expected_bool",
[
("openai/o3-mini", True),
("o3-mini", True),
("xai/grok-3-mini-beta", True),
("xai/grok-3-mini-fast-beta", True),
("xai/grok-2", False),
("gpt-3.5-turbo", False),
],
)
def test_supports_reasoning(model, expected_bool):
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
litellm.model_cost = litellm.get_model_cost_map(url="")
try:
assert litellm.supports_reasoning(model=model) == expected_bool
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_get_max_token_unit_test():
"""
More complete testing in `test_completion_cost.py`