Litellm dev 01 01 2025 p2 (#7615)

* fix(utils.py): prevent double logging when passing 'fallbacks=' to .completion()

Fixes https://github.com/BerriAI/litellm/issues/7477

* fix(utils.py): fix vertex anthropic check

* fix(utils.py): ensure supported params is always set

Fixes https://github.com/BerriAI/litellm/issues/7470

* test(test_optional_params.py): add unit testing to prevent mistranslation

Fixes https://github.com/BerriAI/litellm/issues/7470

* fix: fix linting error

* test: cleanup
This commit is contained in:
Krish Dholakia 2025-01-07 21:40:33 -08:00 committed by GitHub
parent 081826a5d6
commit e8ed40a27b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 115 additions and 206 deletions

View file

@ -590,6 +590,29 @@ def function_setup( # noqa: PLR0915
raise e
async def _client_async_logging_helper(
logging_obj: LiteLLMLoggingObject,
result,
start_time,
end_time,
is_completion_with_fallbacks: bool,
):
if (
is_completion_with_fallbacks is False
): # don't log the parent event litellm.completion_with_fallbacks as a 'log_success_event', this will lead to double logging the same call - https://github.com/BerriAI/litellm/issues/7477
print_verbose(
f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}"
)
# check if user does not want this to be logged
asyncio.create_task(
logging_obj.async_success_handler(result, start_time, end_time)
)
threading.Thread(
target=logging_obj.success_handler,
args=(result, start_time, end_time),
).start()
def client(original_function): # noqa: PLR0915
rules_obj = Rules()
@ -1017,6 +1040,7 @@ def client(original_function): # noqa: PLR0915
kwargs["litellm_call_id"] = str(uuid.uuid4())
model: Optional[str] = args[0] if len(args) > 0 else kwargs.get("model", None)
is_completion_with_fallbacks = kwargs.get("fallbacks") is not None
try:
if logging_obj is None:
@ -1119,12 +1143,14 @@ def client(original_function): # noqa: PLR0915
)
# LOG SUCCESS - handle streaming success logging in the _next_ object
print_verbose(
f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}"
)
# check if user does not want this to be logged
asyncio.create_task(
logging_obj.async_success_handler(result, start_time, end_time)
_client_async_logging_helper(
logging_obj=logging_obj,
result=result,
start_time=start_time,
end_time=end_time,
is_completion_with_fallbacks=is_completion_with_fallbacks,
)
)
executor.submit(
logging_obj.success_handler,
@ -2754,7 +2780,7 @@ def get_optional_params( # noqa: PLR0915
new_parameters.pop("additionalProperties", None)
tool_function["parameters"] = new_parameters
def _check_valid_arg(supported_params):
def _check_valid_arg(supported_params: List[str]):
verbose_logger.info(
f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}"
)
@ -2798,13 +2824,17 @@ def get_optional_params( # noqa: PLR0915
provider_config = ProviderConfigManager.get_provider_chat_config(
model=model, provider=LlmProviders(custom_llm_provider)
)
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
if supported_params is None:
supported_params = get_supported_openai_params(
model=model, custom_llm_provider="openai"
)
_check_valid_arg(supported_params=supported_params or [])
## raise exception if provider doesn't support passed in param
if custom_llm_provider == "anthropic":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.AnthropicConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -2816,9 +2846,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "anthropic_text":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
optional_params = litellm.AnthropicTextConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -2829,7 +2856,6 @@ def get_optional_params( # noqa: PLR0915
else False
),
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.AnthropicTextConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -2843,10 +2869,6 @@ def get_optional_params( # noqa: PLR0915
elif custom_llm_provider == "cohere":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
# handle cohere params
optional_params = litellm.CohereConfig().map_openai_params(
non_default_params=non_default_params,
@ -2859,11 +2881,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "cohere_chat":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
# handle cohere params
optional_params = litellm.CohereChatConfig().map_openai_params(
non_default_params=non_default_params,
@ -2876,10 +2893,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "triton":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.TritonConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -2888,11 +2901,6 @@ def get_optional_params( # noqa: PLR0915
)
elif custom_llm_provider == "maritalk":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.MaritalkConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -2904,11 +2912,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "replicate":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.ReplicateConfig().map_openai_params(
non_default_params=non_default_params,
@ -2921,10 +2924,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "predibase":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.PredibaseConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -2936,11 +2935,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "huggingface":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.HuggingfaceConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -2952,11 +2946,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "together_ai":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.TogetherAIConfig().map_openai_params(
non_default_params=non_default_params,
@ -2976,12 +2965,6 @@ def get_optional_params( # noqa: PLR0915
or model in litellm.vertex_language_models
or model in litellm.vertex_vision_models
):
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VertexGeminiConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -2994,10 +2977,6 @@ def get_optional_params( # noqa: PLR0915
)
elif custom_llm_provider == "gemini":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.GoogleAIStudioGeminiConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3011,10 +2990,6 @@ def get_optional_params( # noqa: PLR0915
elif custom_llm_provider == "vertex_ai_beta" or (
custom_llm_provider == "vertex_ai" and "gemini" in model
):
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VertexGeminiConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3028,10 +3003,6 @@ def get_optional_params( # noqa: PLR0915
elif litellm.VertexAIAnthropicConfig.is_supported_model(
model=model, custom_llm_provider=custom_llm_provider
):
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VertexAIAnthropicConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -3043,10 +3014,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_llama3_models:
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VertexAILlama3Config().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3058,10 +3025,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_mistral_models:
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
if "codestral" in model:
optional_params = litellm.CodestralTextCompletionConfig().map_openai_params(
model=model,
@ -3085,10 +3048,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_ai_ai21_models:
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VertexAIAi21Config().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3100,11 +3059,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "sagemaker":
## check if unsupported param passed in
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
# temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None
optional_params = litellm.SagemakerConfig().map_openai_params(
non_default_params=non_default_params,
@ -3117,12 +3071,8 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "bedrock":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
base_model = litellm.AmazonConverseConfig()._get_base_model(model)
if base_model in litellm.bedrock_converse_models:
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.AmazonConverseConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -3136,7 +3086,6 @@ def get_optional_params( # noqa: PLR0915
)
elif "anthropic" in model:
_check_valid_arg(supported_params=supported_params)
if "aws_bedrock_client" in passed_params: # deprecated boto3.invoke route.
if model.startswith("anthropic.claude-3"):
optional_params = (
@ -3151,7 +3100,6 @@ def get_optional_params( # noqa: PLR0915
optional_params=optional_params,
)
elif provider_config is not None:
_check_valid_arg(supported_params=supported_params)
optional_params = provider_config.map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3163,11 +3111,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "cloudflare":
# https://developers.cloudflare.com/workers-ai/models/text-generation/#input
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.CloudflareChatConfig().map_openai_params(
model=model,
@ -3180,10 +3123,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "ollama":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OllamaConfig().map_openai_params(
non_default_params=non_default_params,
@ -3196,11 +3135,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "ollama_chat":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OllamaChatConfig().map_openai_params(
model=model,
@ -3213,10 +3147,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "nlp_cloud":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.NLPCloudConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3229,10 +3159,6 @@ def get_optional_params( # noqa: PLR0915
)
elif custom_llm_provider == "petals":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.PetalsConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3244,10 +3170,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "deepinfra":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.DeepInfraConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3259,10 +3181,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "perplexity" and provider_config is not None:
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = provider_config.map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3274,10 +3192,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "mistral" or custom_llm_provider == "codestral":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.MistralConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3289,10 +3203,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "text-completion-codestral":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.CodestralTextCompletionConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3305,10 +3215,6 @@ def get_optional_params( # noqa: PLR0915
)
elif custom_llm_provider == "databricks":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.DatabricksConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3320,10 +3226,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "nvidia_nim":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.NvidiaNimConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
@ -3335,10 +3237,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "cerebras":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.CerebrasConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3350,20 +3248,12 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "xai":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.XAIChatConfig().map_openai_params(
model=model,
non_default_params=non_default_params,
optional_params=optional_params,
)
elif custom_llm_provider == "ai21_chat" or custom_llm_provider == "ai21":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.AI21ChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3375,10 +3265,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "fireworks_ai":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.FireworksAIConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3390,10 +3276,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "volcengine":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VolcEngineConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3405,10 +3287,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "hosted_vllm":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.HostedVLLMChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3420,10 +3298,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "vllm":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.VLLMConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3435,11 +3309,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "groq":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.GroqChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3451,11 +3320,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "deepseek":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OpenAIConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3467,11 +3331,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "openrouter":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OpenrouterConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3484,10 +3343,6 @@ def get_optional_params( # noqa: PLR0915
)
elif custom_llm_provider == "watsonx":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.IBMWatsonXChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3505,10 +3360,6 @@ def get_optional_params( # noqa: PLR0915
f"LiteLLM now defaults to Watsonx's `/text/chat` endpoint. Please use the `watsonx_text` provider instead, to call the `/text/generation` endpoint. Param: {param}"
)
elif custom_llm_provider == "watsonx_text":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider=custom_llm_provider
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.IBMWatsonXAIConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3520,10 +3371,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "openai":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider="openai"
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OpenAIConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
@ -3535,10 +3382,6 @@ def get_optional_params( # noqa: PLR0915
),
)
elif custom_llm_provider == "azure":
supported_params = get_supported_openai_params(
model=model, custom_llm_provider="azure"
)
_check_valid_arg(supported_params=supported_params)
if litellm.AzureOpenAIO1Config().is_o1_model(model=model):
optional_params = litellm.AzureOpenAIO1Config().map_openai_params(
non_default_params=non_default_params,
@ -3574,10 +3417,6 @@ def get_optional_params( # noqa: PLR0915
),
)
else: # assume passing in params for openai-like api
supported_params = get_supported_openai_params(
model=model, custom_llm_provider="custom_openai"
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.OpenAILikeChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,