Merge pull request #4137 from jamesbraza/custom-llm-provider

Allowing inference of LLM provider in `get_supported_openai_params`
This commit is contained in:
Krish Dholakia 2024-06-11 18:38:42 -07:00 committed by GitHub
commit 029d32344f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 15 additions and 1 deletions

View file

@ -23,6 +23,7 @@ from litellm.utils import (
create_pretrained_tokenizer, create_pretrained_tokenizer,
create_tokenizer, create_tokenizer,
get_max_tokens, get_max_tokens,
get_supported_openai_params,
) )
# Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils' # Assuming your trim_messages, shorten_message_to_fit_limit, and get_token_count functions are all in a module named 'message_utils'
@ -386,3 +387,11 @@ def test_get_max_token_unit_test():
) # Returns a number instead of throwing an Exception ) # Returns a number instead of throwing an Exception
assert isinstance(max_tokens, int) assert isinstance(max_tokens, int)
def test_get_supported_openai_params() -> None:
# Mapped provider
assert isinstance(get_supported_openai_params("gpt-4"), list)
# Unmapped provider
assert get_supported_openai_params("nonexistent") is None

View file

@ -6227,7 +6227,7 @@ def get_first_chars_messages(kwargs: dict) -> str:
def get_supported_openai_params( def get_supported_openai_params(
model: str, model: str,
custom_llm_provider: str, custom_llm_provider: Optional[str] = None,
request_type: Literal["chat_completion", "embeddings"] = "chat_completion", request_type: Literal["chat_completion", "embeddings"] = "chat_completion",
) -> Optional[list]: ) -> Optional[list]:
""" """
@ -6242,6 +6242,11 @@ def get_supported_openai_params(
- List if custom_llm_provider is mapped - List if custom_llm_provider is mapped
- None if unmapped - None if unmapped
""" """
if not custom_llm_provider:
try:
custom_llm_provider = litellm.get_llm_provider(model=model)[1]
except BadRequestError:
return None
if custom_llm_provider == "bedrock": if custom_llm_provider == "bedrock":
return litellm.AmazonConverseConfig().get_supported_openai_params(model=model) return litellm.AmazonConverseConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "ollama": elif custom_llm_provider == "ollama":