diff --git a/litellm/llms/vertex_ai/vertex_ai_partner_models/anthropic/transformation.py b/litellm/llms/vertex_ai/vertex_ai_partner_models/anthropic/transformation.py index 048cb3f0f1..08b4cfdb4e 100644 --- a/litellm/llms/vertex_ai/vertex_ai_partner_models/anthropic/transformation.py +++ b/litellm/llms/vertex_ai/vertex_ai_partner_models/anthropic/transformation.py @@ -1,6 +1,6 @@ # What is this? ## Handler file for calling claude-3 on vertex ai -from typing import List, Optional +from typing import List import httpx @@ -65,13 +65,16 @@ class VertexAIAnthropicConfig(AnthropicConfig): return data @classmethod - def is_supported_model( - cls, model: str, custom_llm_provider: Optional[str] = None - ) -> bool: + def is_supported_model(cls, model: str, custom_llm_provider: str) -> bool: """ Check if the model is supported by the VertexAI Anthropic API. """ - if custom_llm_provider == "vertex_ai" and "claude" in model.lower(): + if ( + custom_llm_provider != "vertex_ai" + and custom_llm_provider != "vertex_ai_beta" + ): + return False + if "claude" in model.lower(): return True elif model in litellm.vertex_anthropic_models: return True diff --git a/litellm/utils.py b/litellm/utils.py index 2a2e6ff45b..00b57cef17 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -590,6 +590,29 @@ def function_setup( # noqa: PLR0915 raise e +async def _client_async_logging_helper( + logging_obj: LiteLLMLoggingObject, + result, + start_time, + end_time, + is_completion_with_fallbacks: bool, +): + if ( + is_completion_with_fallbacks is False + ): # don't log the parent event litellm.completion_with_fallbacks as a 'log_success_event', this will lead to double logging the same call - https://github.com/BerriAI/litellm/issues/7477 + print_verbose( + f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" + ) + # check if user does not want this to be logged + asyncio.create_task( + logging_obj.async_success_handler(result, start_time, end_time) + ) + threading.Thread( + target=logging_obj.success_handler, + args=(result, start_time, end_time), + ).start() + + def client(original_function): # noqa: PLR0915 rules_obj = Rules() @@ -1017,6 +1040,7 @@ def client(original_function): # noqa: PLR0915 kwargs["litellm_call_id"] = str(uuid.uuid4()) model: Optional[str] = args[0] if len(args) > 0 else kwargs.get("model", None) + is_completion_with_fallbacks = kwargs.get("fallbacks") is not None try: if logging_obj is None: @@ -1119,12 +1143,14 @@ def client(original_function): # noqa: PLR0915 ) # LOG SUCCESS - handle streaming success logging in the _next_ object - print_verbose( - f"Async Wrapper: Completed Call, calling async_success_handler: {logging_obj.async_success_handler}" - ) - # check if user does not want this to be logged asyncio.create_task( - logging_obj.async_success_handler(result, start_time, end_time) + _client_async_logging_helper( + logging_obj=logging_obj, + result=result, + start_time=start_time, + end_time=end_time, + is_completion_with_fallbacks=is_completion_with_fallbacks, + ) ) executor.submit( logging_obj.success_handler, @@ -2754,7 +2780,7 @@ def get_optional_params( # noqa: PLR0915 new_parameters.pop("additionalProperties", None) tool_function["parameters"] = new_parameters - def _check_valid_arg(supported_params): + def _check_valid_arg(supported_params: List[str]): verbose_logger.info( f"\nLiteLLM completion() model= {model}; provider = {custom_llm_provider}" ) @@ -2798,13 +2824,17 @@ def get_optional_params( # noqa: PLR0915 provider_config = ProviderConfigManager.get_provider_chat_config( model=model, provider=LlmProviders(custom_llm_provider) ) + supported_params = get_supported_openai_params( + model=model, custom_llm_provider=custom_llm_provider + ) + if supported_params is None: + supported_params = get_supported_openai_params( + model=model, custom_llm_provider="openai" + ) + _check_valid_arg(supported_params=supported_params or []) ## raise exception if provider doesn't support passed in param if custom_llm_provider == "anthropic": ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.AnthropicConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -2816,9 +2846,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "anthropic_text": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) optional_params = litellm.AnthropicTextConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -2829,7 +2856,6 @@ def get_optional_params( # noqa: PLR0915 else False ), ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.AnthropicTextConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -2843,10 +2869,6 @@ def get_optional_params( # noqa: PLR0915 elif custom_llm_provider == "cohere": ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) # handle cohere params optional_params = litellm.CohereConfig().map_openai_params( non_default_params=non_default_params, @@ -2859,11 +2881,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "cohere_chat": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) # handle cohere params optional_params = litellm.CohereChatConfig().map_openai_params( non_default_params=non_default_params, @@ -2876,10 +2893,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "triton": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.TritonConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -2888,11 +2901,6 @@ def get_optional_params( # noqa: PLR0915 ) elif custom_llm_provider == "maritalk": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.MaritalkConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -2904,11 +2912,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "replicate": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.ReplicateConfig().map_openai_params( non_default_params=non_default_params, @@ -2921,10 +2924,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "predibase": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.PredibaseConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -2936,11 +2935,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "huggingface": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.HuggingfaceConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -2952,11 +2946,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "together_ai": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.TogetherAIConfig().map_openai_params( non_default_params=non_default_params, @@ -2976,12 +2965,6 @@ def get_optional_params( # noqa: PLR0915 or model in litellm.vertex_language_models or model in litellm.vertex_vision_models ): - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) - optional_params = litellm.VertexGeminiConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -2994,10 +2977,6 @@ def get_optional_params( # noqa: PLR0915 ) elif custom_llm_provider == "gemini": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.GoogleAIStudioGeminiConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3011,10 +2990,6 @@ def get_optional_params( # noqa: PLR0915 elif custom_llm_provider == "vertex_ai_beta" or ( custom_llm_provider == "vertex_ai" and "gemini" in model ): - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexGeminiConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3028,10 +3003,6 @@ def get_optional_params( # noqa: PLR0915 elif litellm.VertexAIAnthropicConfig.is_supported_model( model=model, custom_llm_provider=custom_llm_provider ): - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexAIAnthropicConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -3043,10 +3014,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_llama3_models: - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexAILlama3Config().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3058,10 +3025,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_mistral_models: - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) if "codestral" in model: optional_params = litellm.CodestralTextCompletionConfig().map_openai_params( model=model, @@ -3085,10 +3048,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "vertex_ai" and model in litellm.vertex_ai_ai21_models: - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VertexAIAi21Config().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3100,11 +3059,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "sagemaker": - ## check if unsupported param passed in - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) # temperature, top_p, n, stream, stop, max_tokens, n, presence_penalty default to None optional_params = litellm.SagemakerConfig().map_openai_params( non_default_params=non_default_params, @@ -3117,12 +3071,8 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "bedrock": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) base_model = litellm.AmazonConverseConfig()._get_base_model(model) if base_model in litellm.bedrock_converse_models: - _check_valid_arg(supported_params=supported_params) optional_params = litellm.AmazonConverseConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -3136,7 +3086,6 @@ def get_optional_params( # noqa: PLR0915 ) elif "anthropic" in model: - _check_valid_arg(supported_params=supported_params) if "aws_bedrock_client" in passed_params: # deprecated boto3.invoke route. if model.startswith("anthropic.claude-3"): optional_params = ( @@ -3151,7 +3100,6 @@ def get_optional_params( # noqa: PLR0915 optional_params=optional_params, ) elif provider_config is not None: - _check_valid_arg(supported_params=supported_params) optional_params = provider_config.map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3163,11 +3111,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "cloudflare": - # https://developers.cloudflare.com/workers-ai/models/text-generation/#input - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.CloudflareChatConfig().map_openai_params( model=model, @@ -3180,10 +3123,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "ollama": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.OllamaConfig().map_openai_params( non_default_params=non_default_params, @@ -3196,11 +3135,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "ollama_chat": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - - _check_valid_arg(supported_params=supported_params) optional_params = litellm.OllamaChatConfig().map_openai_params( model=model, @@ -3213,10 +3147,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "nlp_cloud": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.NLPCloudConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3229,10 +3159,6 @@ def get_optional_params( # noqa: PLR0915 ) elif custom_llm_provider == "petals": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.PetalsConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3244,10 +3170,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "deepinfra": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.DeepInfraConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3259,10 +3181,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "perplexity" and provider_config is not None: - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = provider_config.map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3274,10 +3192,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "mistral" or custom_llm_provider == "codestral": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.MistralConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3289,10 +3203,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "text-completion-codestral": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.CodestralTextCompletionConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3305,10 +3215,6 @@ def get_optional_params( # noqa: PLR0915 ) elif custom_llm_provider == "databricks": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.DatabricksConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3320,10 +3226,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "nvidia_nim": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.NvidiaNimConfig().map_openai_params( model=model, non_default_params=non_default_params, @@ -3335,10 +3237,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "cerebras": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.CerebrasConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3350,20 +3248,12 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "xai": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.XAIChatConfig().map_openai_params( model=model, non_default_params=non_default_params, optional_params=optional_params, ) elif custom_llm_provider == "ai21_chat" or custom_llm_provider == "ai21": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.AI21ChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3375,10 +3265,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "fireworks_ai": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.FireworksAIConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3390,10 +3276,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "volcengine": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VolcEngineConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3405,10 +3287,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "hosted_vllm": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.HostedVLLMChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3420,10 +3298,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "vllm": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.VLLMConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3435,11 +3309,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "groq": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) - optional_params = litellm.GroqChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3451,11 +3320,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "deepseek": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) - optional_params = litellm.OpenAIConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3467,11 +3331,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "openrouter": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) - optional_params = litellm.OpenrouterConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3484,10 +3343,6 @@ def get_optional_params( # noqa: PLR0915 ) elif custom_llm_provider == "watsonx": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.IBMWatsonXChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3505,10 +3360,6 @@ def get_optional_params( # noqa: PLR0915 f"LiteLLM now defaults to Watsonx's `/text/chat` endpoint. Please use the `watsonx_text` provider instead, to call the `/text/generation` endpoint. Param: {param}" ) elif custom_llm_provider == "watsonx_text": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider=custom_llm_provider - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.IBMWatsonXAIConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3520,10 +3371,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "openai": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider="openai" - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.OpenAIConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, @@ -3535,10 +3382,6 @@ def get_optional_params( # noqa: PLR0915 ), ) elif custom_llm_provider == "azure": - supported_params = get_supported_openai_params( - model=model, custom_llm_provider="azure" - ) - _check_valid_arg(supported_params=supported_params) if litellm.AzureOpenAIO1Config().is_o1_model(model=model): optional_params = litellm.AzureOpenAIO1Config().map_openai_params( non_default_params=non_default_params, @@ -3574,10 +3417,6 @@ def get_optional_params( # noqa: PLR0915 ), ) else: # assume passing in params for openai-like api - supported_params = get_supported_openai_params( - model=model, custom_llm_provider="custom_openai" - ) - _check_valid_arg(supported_params=supported_params) optional_params = litellm.OpenAILikeChatConfig().map_openai_params( non_default_params=non_default_params, optional_params=optional_params, diff --git a/tests/llm_translation/test_optional_params.py b/tests/llm_translation/test_optional_params.py index 5d6390e34c..dcb38f6241 100644 --- a/tests/llm_translation/test_optional_params.py +++ b/tests/llm_translation/test_optional_params.py @@ -1021,6 +1021,48 @@ def test_gemini_frequency_penalty(): assert "frequency_penalty" in optional_params +def test_litellm_proxy_claude_3_5_sonnet(): + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + }, + } + ] + + tool_choice = "auto" + + optional_params = get_optional_params( + model="claude-3-5-sonnet", + custom_llm_provider="litellm_proxy", + tools=tools, + tool_choice=tool_choice, + ) + assert optional_params["tools"] == tools + assert optional_params["tool_choice"] == tool_choice + + +def test_is_vertex_anthropic_model(): + assert ( + litellm.VertexAIAnthropicConfig().is_supported_model( + model="claude-3-5-sonnet", custom_llm_provider="litellm_proxy" + ) + is False + ) + def test_groq_response_format_json_schema(): optional_params = get_optional_params( model="llama-3.1-70b-versatile", diff --git a/tests/local_testing/test_acompletion.py b/tests/local_testing/test_acompletion.py index b83e346539..2f0a2fc47a 100644 --- a/tests/local_testing/test_acompletion.py +++ b/tests/local_testing/test_acompletion.py @@ -34,3 +34,27 @@ def test_acompletion_params(): # test_acompletion_params() + + +@pytest.mark.asyncio +async def test_langfuse_double_logging(): + import litellm + + litellm.set_verbose = True + litellm.success_callback = ["langfuse"] + litellm.failure_callback = ["langfuse"] # logs errors to langfuse + + models = ["gpt-4o-mini", "claude-3-5-haiku-20241022"] + + messages = [ + {"role": "user", "content": "Hello, how are you?"}, + ] + + resp = await litellm.acompletion( + model=models[0], + messages=messages, + temperature=0.0, + fallbacks=models[1:], + # metadata={"generation_name": "test-gen", "project": "litellm-test"}, + ) + return resp diff --git a/tests/local_testing/test_function_calling.py b/tests/local_testing/test_function_calling.py index e8a1180c7e..7dddeb11cf 100644 --- a/tests/local_testing/test_function_calling.py +++ b/tests/local_testing/test_function_calling.py @@ -156,6 +156,7 @@ def test_aaparallel_function_call(model): # test_parallel_function_call() + from litellm.types.utils import ChatCompletionMessageToolCall, Function, Message