ModelAlias -> ProviderModelEntry

This commit is contained in:
Ashwin Bharambe 2025-02-20 14:02:36 -08:00
parent 561295af76
commit 07ccf908f7
27 changed files with 132 additions and 132 deletions

View file

@ -10,7 +10,7 @@ from llama_stack.apis.models import ModelInput
from llama_stack.distribution.datatypes import Provider, ToolGroupInput
from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.bedrock.models import MODEL_ALIASES
from llama_stack.providers.remote.inference.bedrock.models import MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -47,7 +47,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id="bedrock",
)
for m in MODEL_ALIASES
for m in MODEL_ENTRIES
]
default_tool_groups = [
ToolGroupInput(

View file

@ -14,7 +14,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import (
)
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.cerebras import CerebrasImplConfig
from llama_stack.providers.remote.inference.cerebras.models import model_aliases
from llama_stack.providers.remote.inference.cerebras.models import model_entries
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -55,7 +55,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id="cerebras",
)
for m in model_aliases
for m in model_entries
]
embedding_model = ModelInput(
model_id="all-MiniLM-L6-v2",

View file

@ -19,7 +19,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import (
)
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.fireworks import FireworksImplConfig
from llama_stack.providers.remote.inference.fireworks.models import MODEL_ALIASES
from llama_stack.providers.remote.inference.fireworks.models import MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -67,7 +67,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id="fireworks",
)
for m in MODEL_ALIASES
for m in MODEL_ENTRIES
]
embedding_model = ModelInput(
model_id="all-MiniLM-L6-v2",

View file

@ -9,7 +9,7 @@ from pathlib import Path
from llama_stack.distribution.datatypes import ModelInput, Provider, ToolGroupInput
from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.remote.inference.nvidia import NVIDIAConfig
from llama_stack.providers.remote.inference.nvidia.models import _MODEL_ALIASES
from llama_stack.providers.remote.inference.nvidia.models import _MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -45,7 +45,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id="nvidia",
)
for m in _MODEL_ALIASES
for m in _MODEL_ENTRIES
]
default_tool_groups = [
ToolGroupInput(

View file

@ -119,7 +119,7 @@ llama stack run ./run-with-safety.yaml \
### (Optional) Update Model Serving Configuration
```{note}
Please check the [model_aliases](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models.
Please check the [model_entries](https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/inference/ollama/ollama.py#L45) for the supported Ollama models.
```
To serve a new model with `ollama`

View file

@ -14,7 +14,7 @@ from llama_stack.distribution.datatypes import (
)
from llama_stack.models.llama.sku_list import all_registered_models
from llama_stack.providers.remote.inference.sambanova import SambaNovaImplConfig
from llama_stack.providers.remote.inference.sambanova.models import MODEL_ALIASES
from llama_stack.providers.remote.inference.sambanova.models import MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -47,7 +47,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id=name,
)
for m in MODEL_ALIASES
for m in MODEL_ENTRIES
]
default_tool_groups = [

View file

@ -19,7 +19,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import (
)
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.together import TogetherImplConfig
from llama_stack.providers.remote.inference.together.models import MODEL_ALIASES
from llama_stack.providers.remote.inference.together.models import MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -65,7 +65,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id="together",
)
for m in MODEL_ALIASES
for m in MODEL_ENTRIES
]
default_tool_groups = [
ToolGroupInput(