fix after merge

This commit is contained in:
Vladislav 2025-02-21 13:56:16 +01:00
parent 19f3b23d47
commit 7cbf3f8383
2 changed files with 9 additions and 12 deletions

View file

@ -5,25 +5,22 @@
# the root directory of this source tree.
from llama_stack.models.llama.sku_list import CoreModelId
from llama_stack.providers.utils.inference.model_registry import (
build_model_alias,
build_model_alias_with_just_provider_model_id,
)
from llama_stack.providers.utils.inference.model_registry import build_model_entry
_MODEL_ALIASES = [
build_model_alias(
_MODEL_ENTRIES = [
build_model_entry(
"llama3-8b-8192",
CoreModelId.llama3_1_8b_instruct.value,
),
build_model_alias_with_just_provider_model_id(
build_model_entry(
"llama-3.1-8b-instant",
CoreModelId.llama3_1_8b_instruct.value,
),
build_model_alias(
build_model_entry(
"llama3-70b-8192",
CoreModelId.llama3_70b_instruct.value,
),
build_model_alias(
build_model_entry(
"llama-3.3-70b-versatile",
CoreModelId.llama3_3_70b_instruct.value,
),
@ -31,7 +28,7 @@ _MODEL_ALIASES = [
# Preview models aren't recommended for production use, but we include this one
# to pass the test fixture
# TODO(aidand): Replace this with a stable model once Groq supports it
build_model_alias(
build_model_entry(
"llama-3.2-3b-preview",
CoreModelId.llama3_2_3b_instruct.value,
),

View file

@ -18,7 +18,7 @@ from llama_stack.providers.inline.inference.sentence_transformers import (
)
from llama_stack.providers.inline.vector_io.faiss.config import FaissVectorIOConfig
from llama_stack.providers.remote.inference.groq import GroqConfig
from llama_stack.providers.remote.inference.groq.models import _MODEL_ALIASES
from llama_stack.providers.remote.inference.groq.models import _MODEL_ENTRIES
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
@ -73,7 +73,7 @@ def get_distribution_template() -> DistributionTemplate:
provider_model_id=m.provider_model_id,
provider_id=name,
)
for m in _MODEL_ALIASES
for m in _MODEL_ENTRIES
]
default_tool_groups = [