mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-27 06:28:50 +00:00
feat: implement dynamic model detection support for inference providers using litellm
This enhancement allows inference providers using LiteLLMOpenAIMixin to validate model availability against LiteLLM's official provider model listings, improving reliability and user experience when working with different AI service providers. - Add litellm_provider_name parameter to LiteLLMOpenAIMixin constructor - Add check_model_availability method to LiteLLMOpenAIMixin using litellm.models_by_provider - Update Gemini, Groq, and SambaNova inference adapters to pass litellm_provider_name
This commit is contained in:
parent
cd8715d327
commit
bf63470c22
4 changed files with 33 additions and 0 deletions
|
@ -36,6 +36,7 @@ class GroqInferenceAdapter(LiteLLMOpenAIMixin):
|
|||
model_entries=MODEL_ENTRIES,
|
||||
api_key_from_config=config.api_key,
|
||||
provider_data_api_key_field="groq_api_key",
|
||||
litellm_provider_name="groq",
|
||||
)
|
||||
self.config = config
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue