add ai21_chat as new provider

This commit is contained in:
Ishaan Jaff 2024-09-02 11:44:06 -07:00
parent 5c0480114e
commit 6bd18f7cbe
2 changed files with 10 additions and 2 deletions

View file

@ -455,6 +455,7 @@ openai_compatible_providers: List = [
"groq", "groq",
"nvidia_nim", "nvidia_nim",
"cerebras", "cerebras",
"ai21_chat",
"volcengine", "volcengine",
"codestral", "codestral",
"deepseek", "deepseek",
@ -694,6 +695,7 @@ provider_list: List = [
"groq", "groq",
"nvidia_nim", "nvidia_nim",
"cerebras", "cerebras",
"ai21_chat",
"volcengine", "volcengine",
"codestral", "codestral",
"text-completion-codestral", "text-completion-codestral",
@ -852,7 +854,8 @@ from .llms.predibase import PredibaseConfig
from .llms.replicate import ReplicateConfig from .llms.replicate import ReplicateConfig
from .llms.cohere.completion import CohereConfig from .llms.cohere.completion import CohereConfig
from .llms.clarifai import ClarifaiConfig from .llms.clarifai import ClarifaiConfig
from .llms.ai21 import AI21Config from .llms.AI21.completion import AI21Config
from .llms.AI21.chat import AI21ChatConfig
from .llms.together_ai import TogetherAIConfig from .llms.together_ai import TogetherAIConfig
from .llms.cloudflare import CloudflareConfig from .llms.cloudflare import CloudflareConfig
from .llms.palm import PalmConfig from .llms.palm import PalmConfig
@ -910,6 +913,7 @@ from .llms.openai import (
) )
from .llms.nvidia_nim import NvidiaNimConfig from .llms.nvidia_nim import NvidiaNimConfig
from .llms.cerebras.chat import CerebrasConfig from .llms.cerebras.chat import CerebrasConfig
from .llms.AI21.chat import AI21ChatConfig
from .llms.fireworks_ai import FireworksAIConfig from .llms.fireworks_ai import FireworksAIConfig
from .llms.volcengine import VolcEngineConfig from .llms.volcengine import VolcEngineConfig
from .llms.text_completion_codestral import MistralTextCompletionConfig from .llms.text_completion_codestral import MistralTextCompletionConfig

View file

@ -75,7 +75,6 @@ from litellm.utils import (
from ._logging import verbose_logger from ._logging import verbose_logger
from .caching import disable_cache, enable_cache, update_cache from .caching import disable_cache, enable_cache, update_cache
from .llms import ( from .llms import (
ai21,
aleph_alpha, aleph_alpha,
baseten, baseten,
clarifai, clarifai,
@ -91,6 +90,7 @@ from .llms import (
replicate, replicate,
vllm, vllm,
) )
from .llms.AI21 import completion as ai21
from .llms.anthropic.chat import AnthropicChatCompletion from .llms.anthropic.chat import AnthropicChatCompletion
from .llms.anthropic.completion import AnthropicTextCompletion from .llms.anthropic.completion import AnthropicTextCompletion
from .llms.azure import AzureChatCompletion, _check_dynamic_azure_params from .llms.azure import AzureChatCompletion, _check_dynamic_azure_params
@ -387,6 +387,7 @@ async def acompletion(
or custom_llm_provider == "groq" or custom_llm_provider == "groq"
or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "nvidia_nim"
or custom_llm_provider == "cerebras" or custom_llm_provider == "cerebras"
or custom_llm_provider == "ai21_chat"
or custom_llm_provider == "volcengine" or custom_llm_provider == "volcengine"
or custom_llm_provider == "codestral" or custom_llm_provider == "codestral"
or custom_llm_provider == "text-completion-codestral" or custom_llm_provider == "text-completion-codestral"
@ -1293,6 +1294,7 @@ def completion(
or custom_llm_provider == "groq" or custom_llm_provider == "groq"
or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "nvidia_nim"
or custom_llm_provider == "cerebras" or custom_llm_provider == "cerebras"
or custom_llm_provider == "ai21_chat"
or custom_llm_provider == "volcengine" or custom_llm_provider == "volcengine"
or custom_llm_provider == "codestral" or custom_llm_provider == "codestral"
or custom_llm_provider == "deepseek" or custom_llm_provider == "deepseek"
@ -3143,6 +3145,7 @@ async def aembedding(*args, **kwargs) -> EmbeddingResponse:
or custom_llm_provider == "groq" or custom_llm_provider == "groq"
or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "nvidia_nim"
or custom_llm_provider == "cerebras" or custom_llm_provider == "cerebras"
or custom_llm_provider == "ai21_chat"
or custom_llm_provider == "volcengine" or custom_llm_provider == "volcengine"
or custom_llm_provider == "deepseek" or custom_llm_provider == "deepseek"
or custom_llm_provider == "fireworks_ai" or custom_llm_provider == "fireworks_ai"
@ -3807,6 +3810,7 @@ async def atext_completion(
or custom_llm_provider == "groq" or custom_llm_provider == "groq"
or custom_llm_provider == "nvidia_nim" or custom_llm_provider == "nvidia_nim"
or custom_llm_provider == "cerebras" or custom_llm_provider == "cerebras"
or custom_llm_provider == "ai21_chat"
or custom_llm_provider == "volcengine" or custom_llm_provider == "volcengine"
or custom_llm_provider == "text-completion-codestral" or custom_llm_provider == "text-completion-codestral"
or custom_llm_provider == "deepseek" or custom_llm_provider == "deepseek"