LiteLLM Common Base LLM Config (pt.3): Move all OAI compatible providers to base llm config (#7148)
All checks were successful
Read Version from pyproject.toml / read-version (push) Successful in 45s

* refactor(fireworks_ai/): inherit from openai like base config

refactors fireworks ai to use a common config

* test: fix import in test

* refactor(watsonx/): refactor watsonx to use llm base config

refactors chat + completion routes to base config path

* fix: fix linting error

* refactor: inherit base llm config for oai compatible routes

* test: fix test

* test: fix test
This commit is contained in:
Krish Dholakia 2024-12-10 17:12:42 -08:00 committed by GitHub
parent 311432ca17
commit 1e87782215
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 107 additions and 41 deletions

View file

@ -1055,7 +1055,11 @@ ALL_LITELLM_RESPONSE_TYPES = [
from .types.utils import ImageObject from .types.utils import ImageObject
from .llms.custom_llm import CustomLLM from .llms.custom_llm import CustomLLM
from .llms.openai_like.chat.handler import OpenAILikeChatConfig
from .llms.galadriel.chat.transformation import GaladrielChatConfig
from .llms.huggingface_restapi import HuggingfaceConfig from .llms.huggingface_restapi import HuggingfaceConfig
from .llms.empower.chat.transformation import EmpowerChatConfig
from .llms.github.chat.transformation import GithubChatConfig
from .llms.anthropic.chat.handler import AnthropicConfig from .llms.anthropic.chat.handler import AnthropicConfig
from .llms.anthropic.experimental_pass_through.transformation import ( from .llms.anthropic.experimental_pass_through.transformation import (
AnthropicExperimentalPassThroughConfig, AnthropicExperimentalPassThroughConfig,

View file

@ -45,21 +45,7 @@ class DatabricksConfig(OpenAIGPTConfig):
@classmethod @classmethod
def get_config(cls): def get_config(cls):
return { return super().get_config()
k: v
for k, v in cls.__dict__.items()
if not k.startswith("__")
and not isinstance(
v,
(
types.FunctionType,
types.BuiltinFunctionType,
classmethod,
staticmethod,
),
)
and v is not None
}
def get_required_params(self) -> List[ProviderField]: def get_required_params(self) -> List[ProviderField]:
"""For a given provider, return it's required fields with a description""" """For a given provider, return it's required fields with a description"""

View file

@ -0,0 +1,9 @@
"""
Translate from OpenAI's `/v1/chat/completions` to Empower's `/v1/chat/completions`
"""
from ...openai_like.chat.transformation import OpenAILikeChatConfig
class EmpowerChatConfig(OpenAILikeChatConfig):
pass

View file

@ -0,0 +1,24 @@
"""
Translate from OpenAI's `/v1/chat/completions` to Galadriel's `/v1/chat/completions`
"""
import json
import types
from typing import List, Optional, Tuple, Union
from pydantic import BaseModel
import litellm
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
)
from ...openai_like.chat.handler import OpenAILikeChatConfig
class GaladrielChatConfig(OpenAILikeChatConfig):
pass

View file

@ -0,0 +1,24 @@
"""
Translate from OpenAI's `/v1/chat/completions` to Github's `/v1/chat/completions`
"""
import json
import types
from typing import List, Optional, Tuple, Union
from pydantic import BaseModel
import litellm
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import (
AllMessageValues,
ChatCompletionAssistantMessage,
ChatCompletionToolParam,
ChatCompletionToolParamFunctionChunk,
)
from ...openai_like.chat.handler import OpenAILikeChatConfig
class GithubChatConfig(OpenAILikeChatConfig):
pass

View file

@ -6303,6 +6303,23 @@ class ProviderConfigManager:
return litellm.IBMWatsonXChatConfig() return litellm.IBMWatsonXChatConfig()
elif litellm.LlmProviders.WATSONX_TEXT == provider: elif litellm.LlmProviders.WATSONX_TEXT == provider:
return litellm.IBMWatsonXAIConfig() return litellm.IBMWatsonXAIConfig()
elif litellm.LlmProviders.EMPOWER == provider:
return litellm.EmpowerChatConfig()
elif litellm.LlmProviders.GITHUB == provider:
return litellm.GithubChatConfig()
elif (
litellm.LlmProviders.CUSTOM == provider
or litellm.LlmProviders.CUSTOM_OPENAI == provider
or litellm.LlmProviders.OPENAI_LIKE == provider
or litellm.LlmProviders.LITELLM_PROXY == provider
):
return litellm.OpenAILikeChatConfig()
elif litellm.LlmProviders.HOSTED_VLLM == provider:
return litellm.HostedVLLMChatConfig()
elif litellm.LlmProviders.LM_STUDIO == provider:
return litellm.LMStudioChatConfig()
elif litellm.LlmProviders.GALADRIEL == provider:
return litellm.GaladrielChatConfig()
return litellm.OpenAIGPTConfig() return litellm.OpenAIGPTConfig()

View file

@ -290,33 +290,35 @@ async def test_add_and_delete_deployments(llm_router, model_list_flag_value):
assert len(llm_router.model_list) == len(model_list) + prev_llm_router_val assert len(llm_router.model_list) == len(model_list) + prev_llm_router_val
# def test_provider_config_manager(): def test_provider_config_manager():
# from litellm import LITELLM_CHAT_PROVIDERS, LlmProviders from litellm import LITELLM_CHAT_PROVIDERS, LlmProviders
# from litellm.utils import ProviderConfigManager from litellm.utils import ProviderConfigManager
# from litellm.llms.base_llm.transformation import BaseConfig from litellm.llms.base_llm.transformation import BaseConfig
# from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig from litellm.llms.OpenAI.chat.gpt_transformation import OpenAIGPTConfig
# for provider in LITELLM_CHAT_PROVIDERS: for provider in LITELLM_CHAT_PROVIDERS:
# assert isinstance( if provider == LlmProviders.TRITON or provider == LlmProviders.PREDIBASE:
# ProviderConfigManager.get_provider_chat_config( continue
# model="gpt-3.5-turbo", provider=LlmProviders(provider) assert isinstance(
# ), ProviderConfigManager.get_provider_chat_config(
# BaseConfig, model="gpt-3.5-turbo", provider=LlmProviders(provider)
# ), f"Provider {provider} is not a subclass of BaseConfig" ),
BaseConfig,
), f"Provider {provider} is not a subclass of BaseConfig"
# config = ProviderConfigManager.get_provider_chat_config( config = ProviderConfigManager.get_provider_chat_config(
# model="gpt-3.5-turbo", provider=LlmProviders(provider) model="gpt-3.5-turbo", provider=LlmProviders(provider)
# ) )
# if ( if (
# provider != litellm.LlmProviders.OPENAI provider != litellm.LlmProviders.OPENAI
# and provider != litellm.LlmProviders.OPENAI_LIKE and provider != litellm.LlmProviders.OPENAI_LIKE
# and provider != litellm.LlmProviders.CUSTOM_OPENAI and provider != litellm.LlmProviders.CUSTOM_OPENAI
# ): ):
# assert ( assert (
# config.__class__.__name__ != "OpenAIGPTConfig" config.__class__.__name__ != "OpenAIGPTConfig"
# ), f"Provider {provider} is an instance of OpenAIGPTConfig" ), f"Provider {provider} is an instance of OpenAIGPTConfig"
# assert ( assert (
# "_abc_impl" not in config.get_config() "_abc_impl" not in config.get_config()
# ), f"Provider {provider} has _abc_impl" ), f"Provider {provider} has _abc_impl"