Allowing inferring custom LLM provider from model inside get_supported_openai_params

This commit is contained in:
James Braza 2024-06-11 14:41:57 -07:00
parent f5b10a47e3
commit cb5456c9f5

View file

@ -6226,7 +6226,7 @@ def get_first_chars_messages(kwargs: dict) -> str:
def get_supported_openai_params(
model: str,
custom_llm_provider: str,
custom_llm_provider: Optional[str] = None,
request_type: Literal["chat_completion", "embeddings"] = "chat_completion",
) -> Optional[list]:
"""
@ -6241,6 +6241,8 @@ def get_supported_openai_params(
- List if custom_llm_provider is mapped
- None if unmapped
"""
if not custom_llm_provider:
custom_llm_provider = litellm.get_llm_provider(model=model)[1]
if custom_llm_provider == "bedrock":
return litellm.AmazonConverseConfig().get_supported_openai_params(model=model)
elif custom_llm_provider == "ollama":