mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-23 22:33:56 +00:00
fix: address reviewer feedback - improve conditional imports and remove provider alias logic\n\n- Improve conditional import approach with better documentation\n- Remove provider-specific alias logic from sku_list.py\n- Conditional imports are necessary because llama4 requires torch\n- Addresses @ashwinb and @raghotham feedback while maintaining compatibility
This commit is contained in:
parent
cb17594611
commit
e083e09401
2 changed files with 5 additions and 15 deletions
|
|
@ -23,19 +23,6 @@ def resolve_model(descriptor: str) -> Model | None:
|
|||
if descriptor in (m.descriptor(), m.huggingface_repo):
|
||||
return m
|
||||
|
||||
# Check provider aliases by attempting to import and check common providers
|
||||
try:
|
||||
from llama_stack.providers.remote.inference.together.models import MODEL_ENTRIES as TOGETHER_ENTRIES
|
||||
|
||||
for entry in TOGETHER_ENTRIES:
|
||||
if descriptor in entry.aliases and entry.llama_model:
|
||||
# Find the model by its descriptor
|
||||
for m in all_registered_models():
|
||||
if m.descriptor() == entry.llama_model:
|
||||
return m
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue