LiteLLM Common Base LLM Config (pt.3): Move all OAI compatible providers to base llm config (#7148)

* refactor(fireworks_ai/): inherit from openai like base config

refactors fireworks ai to use a common config

* test: fix import in test

* refactor(watsonx/): refactor watsonx to use llm base config

refactors chat + completion routes to base config path

* fix: fix linting error

* refactor: inherit base llm config for oai compatible routes

* test: fix test

* test: fix test
This commit is contained in:
Krish Dholakia 2024-12-10 17:12:42 -08:00 committed by GitHub
parent 4eeaaeeacd
commit df12f87a64
7 changed files with 107 additions and 41 deletions

View file

@ -290,33 +290,35 @@ async def test_add_and_delete_deployments(llm_router, model_list_flag_value):
assert len(llm_router.model_list) == len(model_list) + prev_llm_router_val
# def test_provider_config_manager():
# from litellm import LITELLM_CHAT_PROVIDERS, LlmProviders
# from litellm.utils import ProviderConfigManager
# from litellm.llms.base_llm.transformation import BaseConfig
# from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig
def test_provider_config_manager():
from litellm import LITELLM_CHAT_PROVIDERS, LlmProviders
from litellm.utils import ProviderConfigManager
from litellm.llms.base_llm.transformation import BaseConfig
from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig
# for provider in LITELLM_CHAT_PROVIDERS:
# assert isinstance(
# ProviderConfigManager.get_provider_chat_config(
# model="gpt-3.5-turbo", provider=LlmProviders(provider)
# ),
# BaseConfig,
# ), f"Provider {provider} is not a subclass of BaseConfig"
for provider in LITELLM_CHAT_PROVIDERS:
if provider == LlmProviders.TRITON or provider == LlmProviders.PREDIBASE:
continue
assert isinstance(
ProviderConfigManager.get_provider_chat_config(
model="gpt-3.5-turbo", provider=LlmProviders(provider)
),
BaseConfig,
), f"Provider {provider} is not a subclass of BaseConfig"
# config = ProviderConfigManager.get_provider_chat_config(
# model="gpt-3.5-turbo", provider=LlmProviders(provider)
# )
config = ProviderConfigManager.get_provider_chat_config(
model="gpt-3.5-turbo", provider=LlmProviders(provider)
)
# if (
# provider != litellm.LlmProviders.OPENAI
# and provider != litellm.LlmProviders.OPENAI_LIKE
# and provider != litellm.LlmProviders.CUSTOM_OPENAI
# ):
# assert (
# config.__class__.__name__ != "OpenAIGPTConfig"
# ), f"Provider {provider} is an instance of OpenAIGPTConfig"
if (
provider != litellm.LlmProviders.OPENAI
and provider != litellm.LlmProviders.OPENAI_LIKE
and provider != litellm.LlmProviders.CUSTOM_OPENAI
):
assert (
config.__class__.__name__ != "OpenAIGPTConfig"
), f"Provider {provider} is an instance of OpenAIGPTConfig"
# assert (
# "_abc_impl" not in config.get_config()
# ), f"Provider {provider} has _abc_impl"
assert (
"_abc_impl" not in config.get_config()
), f"Provider {provider} has _abc_impl"