Allow passing thinking param to litellm proxy via client sdk + Code QA Refactor on get_optional_params (get correct values) (#9386)

* fix(litellm_proxy/chat/transformation.py): support 'thinking' param

Fixes https://github.com/BerriAI/litellm/issues/9380

* feat(azure/gpt_transformation.py): add azure audio model support

Closes https://github.com/BerriAI/litellm/issues/6305

* fix(utils.py): use provider_config in common functions

* fix(utils.py): add missing provider configs to get_chat_provider_config

* test: fix test

* fix: fix path

* feat(utils.py): make bedrock invoke nova config baseconfig compatible

* fix: fix linting errors

* fix(azure_ai/transformation.py): remove buggy optional param filtering for azure ai

Removes incorrect check for support tool choice when calling azure ai - prevented calling models with response_format unless on litell model cost map

* fix(amazon_cohere_transformation.py): fix bedrock invoke cohere transformation to inherit from coherechatconfig

* test: fix azure ai tool choice mapping

* fix: fix model cost map to add 'supports_tool_choice' to cohere models

* fix(get_supported_openai_params.py): check if custom llm provider in llm providers

* fix(get_supported_openai_params.py): fix llm provider in list check

* fix: fix ruff check errors

* fix: support defs when calling bedrock nova

* fix(factory.py): fix test
This commit is contained in:
Krish Dholakia 2025-04-07 21:04:11 -07:00 committed by GitHub
parent fcf17d114f
commit ac9f03beae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 278 additions and 86 deletions

View file

@ -880,6 +880,9 @@ def test_completion_azure_mistral_large_function_calling(provider):
This primarily tests if the 'Function()' pydantic object correctly handles argument param passed in as a dict vs. string
"""
litellm.set_verbose = True
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
model_cost = litellm.get_model_cost_map(url="")
tools = [
{
"type": "function",
@ -1903,16 +1906,16 @@ def test_completion_openai():
@pytest.mark.parametrize(
"model, api_version",
[
("gpt-4o-2024-08-06", None),
("azure/chatgpt-v-2", None),
# ("gpt-4o-2024-08-06", None),
# ("azure/chatgpt-v-2", None),
("bedrock/anthropic.claude-3-sonnet-20240229-v1:0", None),
("azure/gpt-4o", "2024-08-01-preview"),
# ("azure/gpt-4o", "2024-08-01-preview"),
],
)
@pytest.mark.flaky(retries=3, delay=1)
def test_completion_openai_pydantic(model, api_version):
try:
litellm.set_verbose = True
litellm._turn_on_debug()
from pydantic import BaseModel
messages = [