From eafb376bc2ab9e10a35b2ffe77a1ac1fe9bf5919 Mon Sep 17 00:00:00 2001 From: Sunny Wan Date: Fri, 7 Mar 2025 18:36:28 -0500 Subject: [PATCH] removed hard coding --- litellm/llms/together_ai/chat.py | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/litellm/llms/together_ai/chat.py b/litellm/llms/together_ai/chat.py index 4c53da9f12..8f38da35ff 100644 --- a/litellm/llms/together_ai/chat.py +++ b/litellm/llms/together_ai/chat.py @@ -9,6 +9,7 @@ Docs: https://docs.together.ai/reference/completions-1 from typing import Optional from litellm import get_model_info, verbose_logger +from litellm.utils import supports_function_calling from ..openai.chat.gpt_transformation import OpenAIGPTConfig @@ -20,25 +21,12 @@ class TogetherAIConfig(OpenAIGPTConfig): Docs: https://docs.together.ai/docs/json-mode """ - supports_function_calling: Optional[bool] = None - - supported_models = [ - "deepseek-ai/DeepSeek-V3", - "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", - "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", - "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo", - "meta-llama/Llama-3.3-70B-Instruct-Turbo", - "mistralai/Mixtral-8x7B-Instruct-v0.1", - "mistralai/Mistral-7B-Instruct-v0.1", - "Qwen/Qwen2.5-7B-Instruct-Turbo", - "Qwen/Qwen2.5-72B-Instruct-Turbo" - ] + function_calling: Optional[bool] = supports_function_calling(model, custom_llm_provider = "together_ai") - supports_function_calling = model in supported_models optional_params = super().get_supported_openai_params(model) - if supports_function_calling is not True: + if function_calling is not True: verbose_logger.debug( "Only some together models support function calling/response_format. Docs - https://docs.together.ai/docs/function-calling" )