diff --git a/litellm/__init__.py b/litellm/__init__.py index 3f22e41b6f..5e57d7678d 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -455,6 +455,7 @@ openai_compatible_providers: List = [ "groq", "nvidia_nim", "cerebras", + "ai21_chat", "volcengine", "codestral", "deepseek", @@ -694,6 +695,7 @@ provider_list: List = [ "groq", "nvidia_nim", "cerebras", + "ai21_chat", "volcengine", "codestral", "text-completion-codestral", @@ -852,7 +854,8 @@ from .llms.predibase import PredibaseConfig from .llms.replicate import ReplicateConfig from .llms.cohere.completion import CohereConfig from .llms.clarifai import ClarifaiConfig -from .llms.ai21 import AI21Config +from .llms.AI21.completion import AI21Config +from .llms.AI21.chat import AI21ChatConfig from .llms.together_ai import TogetherAIConfig from .llms.cloudflare import CloudflareConfig from .llms.palm import PalmConfig @@ -910,6 +913,7 @@ from .llms.openai import ( ) from .llms.nvidia_nim import NvidiaNimConfig from .llms.cerebras.chat import CerebrasConfig +from .llms.AI21.chat import AI21ChatConfig from .llms.fireworks_ai import FireworksAIConfig from .llms.volcengine import VolcEngineConfig from .llms.text_completion_codestral import MistralTextCompletionConfig diff --git a/litellm/main.py b/litellm/main.py index 70cd40f317..9bb74fc6cb 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -75,7 +75,6 @@ from litellm.utils import ( from ._logging import verbose_logger from .caching import disable_cache, enable_cache, update_cache from .llms import ( - ai21, aleph_alpha, baseten, clarifai, @@ -91,6 +90,7 @@ from .llms import ( replicate, vllm, ) +from .llms.AI21 import completion as ai21 from .llms.anthropic.chat import AnthropicChatCompletion from .llms.anthropic.completion import AnthropicTextCompletion from .llms.azure import AzureChatCompletion, _check_dynamic_azure_params @@ -387,6 +387,7 @@ async def acompletion( or custom_llm_provider == "groq" or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "cerebras" + or custom_llm_provider == "ai21_chat" or custom_llm_provider == "volcengine" or custom_llm_provider == "codestral" or custom_llm_provider == "text-completion-codestral" @@ -1293,6 +1294,7 @@ def completion( or custom_llm_provider == "groq" or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "cerebras" + or custom_llm_provider == "ai21_chat" or custom_llm_provider == "volcengine" or custom_llm_provider == "codestral" or custom_llm_provider == "deepseek" @@ -3143,6 +3145,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse: or custom_llm_provider == "groq" or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "cerebras" + or custom_llm_provider == "ai21_chat" or custom_llm_provider == "volcengine" or custom_llm_provider == "deepseek" or custom_llm_provider == "fireworks_ai" @@ -3807,6 +3810,7 @@ async def atext_completion( or custom_llm_provider == "groq" or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "cerebras" + or custom_llm_provider == "ai21_chat" or custom_llm_provider == "volcengine" or custom_llm_provider == "text-completion-codestral" or custom_llm_provider == "deepseek"