feat: implement dynamic model detection support for inference providers using litellm (#2886)

# What does this PR do?

This enhancement allows inference providers using LiteLLMOpenAIMixin to
validate model availability against LiteLLM's official provider model
listings, improving reliability and user experience when working with
different AI service providers.

- Add litellm_provider_name parameter to LiteLLMOpenAIMixin constructor
- Add check_model_availability method to LiteLLMOpenAIMixin using
litellm.models_by_provider
- Update Gemini, Groq, and SambaNova inference adapters to pass
litellm_provider_name

## Test Plan

standard CI.
This commit is contained in:
Matthew Farrellee 2025-07-28 13:13:54 -04:00 committed by GitHub
parent c48dcafc77
commit 47c078fcef
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -73,6 +73,15 @@ class LiteLLMOpenAIMixin(
provider_data_api_key_field: str,
openai_compat_api_base: str | None = None,
):
"""
Initialize the LiteLLMOpenAIMixin.
:param model_entries: The model entries to register.
:param api_key_from_config: The API key to use from the config.
:param provider_data_api_key_field: The field in the provider data that contains the API key.
:param litellm_provider_name: The name of the provider, used for model lookups.
:param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility.
"""
ModelRegistryHelper.__init__(self, model_entries)
self.litellm_provider_name = litellm_provider_name
@ -428,3 +437,17 @@ class LiteLLMOpenAIMixin(
logprobs: LogProbConfig | None = None,
):
raise NotImplementedError("Batch chat completion is not supported for OpenAI Compat")
async def check_model_availability(self, model: str) -> bool:
"""
Check if a specific model is available via LiteLLM for the current
provider (self.litellm_provider_name).
:param model: The model identifier to check.
:return: True if the model is available dynamically, False otherwise.
"""
if self.litellm_provider_name not in litellm.models_by_provider:
logger.error(f"Provider {self.litellm_provider_name} is not registered in litellm.")
return False
return model in litellm.models_by_provider[self.litellm_provider_name]