forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Move around bits. This makes the copies from llama-models _much_ easier to maintain and ensures we don't entangle meta-reference specific tidbits into llama-models code even by accident. Also, kills the meta-reference-quantized-gpu distro and rolls quantization deps into meta-reference-gpu. ## Test Plan ``` LLAMA_MODELS_DEBUG=1 \ with-proxy llama stack run meta-reference-gpu \ --env INFERENCE_MODEL=meta-llama/Llama-4-Scout-17B-16E-Instruct \ --env INFERENCE_CHECKPOINT_DIR=<DIR> \ --env MODEL_PARALLEL_SIZE=4 \ --env QUANTIZATION_TYPE=fp8_mixed ``` Start a server with and without quantization. Point integration tests to it using: ``` pytest -s -v tests/integration/inference/test_text_inference.py \ --stack-config http://localhost:8321 --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct ```
59 lines
2 KiB
Python
59 lines
2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from llama_stack.apis.models.models import ModelType
|
|
from llama_stack.models.llama.sku_types import CoreModelId
|
|
from llama_stack.providers.utils.inference.model_registry import (
|
|
ProviderModelEntry,
|
|
build_hf_repo_model_entry,
|
|
)
|
|
|
|
MODEL_ENTRIES = [
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p1-8b-instruct",
|
|
CoreModelId.llama3_1_8b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p1-70b-instruct",
|
|
CoreModelId.llama3_1_70b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p1-405b-instruct",
|
|
CoreModelId.llama3_1_405b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p2-3b-instruct",
|
|
CoreModelId.llama3_2_3b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p2-11b-vision-instruct",
|
|
CoreModelId.llama3_2_11b_vision_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p2-90b-vision-instruct",
|
|
CoreModelId.llama3_2_90b_vision_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-v3p3-70b-instruct",
|
|
CoreModelId.llama3_3_70b_instruct.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-guard-3-8b",
|
|
CoreModelId.llama_guard_3_8b.value,
|
|
),
|
|
build_hf_repo_model_entry(
|
|
"accounts/fireworks/models/llama-guard-3-11b-vision",
|
|
CoreModelId.llama_guard_3_11b_vision.value,
|
|
),
|
|
ProviderModelEntry(
|
|
provider_model_id="nomic-ai/nomic-embed-text-v1.5",
|
|
model_type=ModelType.embedding,
|
|
metadata={
|
|
"embedding_dimension": 768,
|
|
"context_length": 8192,
|
|
},
|
|
),
|
|
]
|