mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-12-22 22:42:25 +00:00
fix: address reviewer feedback - improve conditional imports and remove provider alias logic\n\n- Improve conditional import approach with better documentation\n- Remove provider-specific alias logic from sku_list.py\n- Conditional imports are necessary because llama4 requires torch\n- Addresses @ashwinb and @raghotham feedback while maintaining compatibility
This commit is contained in:
parent
61dc2a9c58
commit
e5377d078d
2 changed files with 5 additions and 15 deletions
|
|
@ -23,19 +23,6 @@ def resolve_model(descriptor: str) -> Model | None:
|
|||
if descriptor in (m.descriptor(), m.huggingface_repo):
|
||||
return m
|
||||
|
||||
# Check provider aliases by attempting to import and check common providers
|
||||
try:
|
||||
from llama_stack.providers.remote.inference.together.models import MODEL_ENTRIES as TOGETHER_ENTRIES
|
||||
|
||||
for entry in TOGETHER_ENTRIES:
|
||||
if descriptor in entry.aliases and entry.llama_model:
|
||||
# Find the model by its descriptor
|
||||
for m in all_registered_models():
|
||||
if m.descriptor() == entry.llama_model:
|
||||
return m
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ from llama_stack.models.llama.llama3.prompt_templates import (
|
|||
)
|
||||
from llama_stack.models.llama.llama3.tokenizer import Tokenizer
|
||||
|
||||
# Conditional imports to avoid heavy dependencies during module loading
|
||||
# Import llama4 components - these require torch to be available
|
||||
try:
|
||||
from llama_models.llama4.chat_format import ChatFormat as Llama4ChatFormat
|
||||
from llama_models.llama4.prompt_templates.system_prompts import (
|
||||
|
|
@ -62,8 +62,11 @@ try:
|
|||
|
||||
LLAMA4_AVAILABLE = True
|
||||
except ImportError:
|
||||
# Llama4 dependencies not available (e.g., torch not installed)
|
||||
# Llama4 requires torch - if not available, we can't use Llama4 features
|
||||
LLAMA4_AVAILABLE = False
|
||||
Llama4ChatFormat = None
|
||||
PythonListCustomToolGeneratorLlama4 = None
|
||||
Llama4Tokenizer = None
|
||||
from llama_stack.models.llama.sku_list import resolve_model
|
||||
from llama_stack.models.llama.sku_types import ModelFamily, is_multimodal
|
||||
from llama_stack.providers.utils.inference import supported_inference_models
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue