mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
Allow passing thinking
param to litellm proxy via client sdk + Code QA Refactor on get_optional_params (get correct values) (#9386)
* fix(litellm_proxy/chat/transformation.py): support 'thinking' param Fixes https://github.com/BerriAI/litellm/issues/9380 * feat(azure/gpt_transformation.py): add azure audio model support Closes https://github.com/BerriAI/litellm/issues/6305 * fix(utils.py): use provider_config in common functions * fix(utils.py): add missing provider configs to get_chat_provider_config * test: fix test * fix: fix path * feat(utils.py): make bedrock invoke nova config baseconfig compatible * fix: fix linting errors * fix(azure_ai/transformation.py): remove buggy optional param filtering for azure ai Removes incorrect check for support tool choice when calling azure ai - prevented calling models with response_format unless on litell model cost map * fix(amazon_cohere_transformation.py): fix bedrock invoke cohere transformation to inherit from coherechatconfig * test: fix azure ai tool choice mapping * fix: fix model cost map to add 'supports_tool_choice' to cohere models * fix(get_supported_openai_params.py): check if custom llm provider in llm providers * fix(get_supported_openai_params.py): fix llm provider in list check * fix: fix ruff check errors * fix: support defs when calling bedrock nova * fix(factory.py): fix test
This commit is contained in:
parent
fcf17d114f
commit
ac9f03beae
21 changed files with 278 additions and 86 deletions
|
@ -3376,7 +3376,6 @@ def get_optional_params( # noqa: PLR0915
|
|||
if drop_params is not None and isinstance(drop_params, bool)
|
||||
else False
|
||||
),
|
||||
messages=messages,
|
||||
)
|
||||
|
||||
elif "anthropic" in bedrock_base_model and bedrock_route == "invoke":
|
||||
|
@ -3719,6 +3718,17 @@ def get_optional_params( # noqa: PLR0915
|
|||
else False
|
||||
),
|
||||
)
|
||||
elif provider_config is not None:
|
||||
optional_params = provider_config.map_openai_params(
|
||||
non_default_params=non_default_params,
|
||||
optional_params=optional_params,
|
||||
model=model,
|
||||
drop_params=(
|
||||
drop_params
|
||||
if drop_params is not None and isinstance(drop_params, bool)
|
||||
else False
|
||||
),
|
||||
)
|
||||
else: # assume passing in params for openai-like api
|
||||
optional_params = litellm.OpenAILikeChatConfig().map_openai_params(
|
||||
non_default_params=non_default_params,
|
||||
|
@ -6202,7 +6212,7 @@ class ProviderConfigManager:
|
|||
@staticmethod
|
||||
def get_provider_chat_config( # noqa: PLR0915
|
||||
model: str, provider: LlmProviders
|
||||
) -> BaseConfig:
|
||||
) -> Optional[BaseConfig]:
|
||||
"""
|
||||
Returns the provider config for a given provider.
|
||||
"""
|
||||
|
@ -6233,9 +6243,22 @@ class ProviderConfigManager:
|
|||
return litellm.AnthropicConfig()
|
||||
elif litellm.LlmProviders.ANTHROPIC_TEXT == provider:
|
||||
return litellm.AnthropicTextConfig()
|
||||
elif litellm.LlmProviders.VERTEX_AI_BETA == provider:
|
||||
return litellm.VertexGeminiConfig()
|
||||
elif litellm.LlmProviders.VERTEX_AI == provider:
|
||||
if "claude" in model:
|
||||
if "gemini" in model:
|
||||
return litellm.VertexGeminiConfig()
|
||||
elif "claude" in model:
|
||||
return litellm.VertexAIAnthropicConfig()
|
||||
elif model in litellm.vertex_mistral_models:
|
||||
if "codestral" in model:
|
||||
return litellm.CodestralTextCompletionConfig()
|
||||
else:
|
||||
return litellm.MistralConfig()
|
||||
elif model in litellm.vertex_ai_ai21_models:
|
||||
return litellm.VertexAIAi21Config()
|
||||
else: # use generic openai-like param mapping
|
||||
return litellm.VertexAILlama3Config()
|
||||
elif litellm.LlmProviders.CLOUDFLARE == provider:
|
||||
return litellm.CloudflareChatConfig()
|
||||
elif litellm.LlmProviders.SAGEMAKER_CHAT == provider:
|
||||
|
@ -6258,7 +6281,6 @@ class ProviderConfigManager:
|
|||
litellm.LlmProviders.CUSTOM == provider
|
||||
or litellm.LlmProviders.CUSTOM_OPENAI == provider
|
||||
or litellm.LlmProviders.OPENAI_LIKE == provider
|
||||
or litellm.LlmProviders.LITELLM_PROXY == provider
|
||||
):
|
||||
return litellm.OpenAILikeChatConfig()
|
||||
elif litellm.LlmProviders.AIOHTTP_OPENAI == provider:
|
||||
|
@ -6363,9 +6385,15 @@ class ProviderConfigManager:
|
|||
return litellm.AmazonMistralConfig()
|
||||
elif bedrock_invoke_provider == "deepseek_r1": # deepseek models on bedrock
|
||||
return litellm.AmazonDeepSeekR1Config()
|
||||
elif bedrock_invoke_provider == "nova":
|
||||
return litellm.AmazonInvokeNovaConfig()
|
||||
else:
|
||||
return litellm.AmazonInvokeConfig()
|
||||
return litellm.OpenAIGPTConfig()
|
||||
elif litellm.LlmProviders.LITELLM_PROXY == provider:
|
||||
return litellm.LiteLLMProxyChatConfig()
|
||||
elif litellm.LlmProviders.OPENAI == provider:
|
||||
return litellm.OpenAIGPTConfig()
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_provider_embedding_config(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue