diff --git a/llama_stack/providers/utils/inference/litellm_openai_mixin.py b/llama_stack/providers/utils/inference/litellm_openai_mixin.py index 02e650307..598abc9a1 100644 --- a/llama_stack/providers/utils/inference/litellm_openai_mixin.py +++ b/llama_stack/providers/utils/inference/litellm_openai_mixin.py @@ -73,6 +73,15 @@ class LiteLLMOpenAIMixin( provider_data_api_key_field: str, openai_compat_api_base: str | None = None, ): + """ + Initialize the LiteLLMOpenAIMixin. + + :param model_entries: The model entries to register. + :param api_key_from_config: The API key to use from the config. + :param provider_data_api_key_field: The field in the provider data that contains the API key. + :param litellm_provider_name: The name of the provider, used for model lookups. + :param openai_compat_api_base: The base URL for OpenAI compatibility, or None if not using OpenAI compatibility. + """ ModelRegistryHelper.__init__(self, model_entries) self.litellm_provider_name = litellm_provider_name @@ -85,6 +94,8 @@ class LiteLLMOpenAIMixin( else: self.is_openai_compat = False + self.litellm_provider_name = litellm_provider_name + async def initialize(self): pass @@ -428,3 +439,17 @@ class LiteLLMOpenAIMixin( logprobs: LogProbConfig | None = None, ): raise NotImplementedError("Batch chat completion is not supported for OpenAI Compat") + + async def check_model_availability(self, model: str) -> bool: + """ + Check if a specific model is available via LiteLLM for the current + provider (self.litellm_provider_name). + + :param model: The model identifier to check. + :return: True if the model is available dynamically, False otherwise. + """ + if self.litellm_provider_name not in litellm.models_by_provider: + logger.error(f"Provider {self.litellm_provider_name} is not registered in litellm.") + return False + + return model in litellm.models_by_provider[self.litellm_provider_name]