forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Move around bits. This makes the copies from llama-models _much_ easier to maintain and ensures we don't entangle meta-reference specific tidbits into llama-models code even by accident. Also, kills the meta-reference-quantized-gpu distro and rolls quantization deps into meta-reference-gpu. ## Test Plan ``` LLAMA_MODELS_DEBUG=1 \ with-proxy llama stack run meta-reference-gpu \ --env INFERENCE_MODEL=meta-llama/Llama-4-Scout-17B-16E-Instruct \ --env INFERENCE_CHECKPOINT_DIR=<DIR> \ --env MODEL_PARALLEL_SIZE=4 \ --env QUANTIZATION_TYPE=fp8_mixed ``` Start a server with and without quantization. Point integration tests to it using: ``` pytest -s -v tests/integration/inference/test_text_inference.py \ --stack-config http://localhost:8321 --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct ```
70 lines
2.2 KiB
Python
70 lines
2.2 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import argparse
|
|
import json
|
|
|
|
from llama_stack.cli.subcommand import Subcommand
|
|
from llama_stack.cli.table import print_table
|
|
from llama_stack.models.llama.sku_list import resolve_model
|
|
|
|
|
|
class ModelDescribe(Subcommand):
|
|
"""Show details about a model"""
|
|
|
|
def __init__(self, subparsers: argparse._SubParsersAction):
|
|
super().__init__()
|
|
self.parser = subparsers.add_parser(
|
|
"describe",
|
|
prog="llama model describe",
|
|
description="Show details about a llama model",
|
|
formatter_class=argparse.RawTextHelpFormatter,
|
|
)
|
|
self._add_arguments()
|
|
self.parser.set_defaults(func=self._run_model_describe_cmd)
|
|
|
|
def _add_arguments(self):
|
|
self.parser.add_argument(
|
|
"-m",
|
|
"--model-id",
|
|
type=str,
|
|
required=True,
|
|
help="See `llama model list` or `llama model list --show-all` for the list of available models",
|
|
)
|
|
|
|
def _run_model_describe_cmd(self, args: argparse.Namespace) -> None:
|
|
from .safety_models import prompt_guard_model_sku
|
|
|
|
prompt_guard = prompt_guard_model_sku()
|
|
if args.model_id == prompt_guard.model_id:
|
|
model = prompt_guard
|
|
else:
|
|
model = resolve_model(args.model_id)
|
|
|
|
if model is None:
|
|
self.parser.error(
|
|
f"Model {args.model_id} not found; try 'llama model list' for a list of available models."
|
|
)
|
|
return
|
|
|
|
headers = [
|
|
"Model",
|
|
model.descriptor(),
|
|
]
|
|
|
|
rows = [
|
|
("Hugging Face ID", model.huggingface_repo or "<Not Available>"),
|
|
("Description", model.description),
|
|
("Context Length", f"{model.max_seq_length // 1024}K tokens"),
|
|
("Weights format", model.quantization_format.value),
|
|
("Model params.json", json.dumps(model.arch_args, indent=4)),
|
|
]
|
|
|
|
print_table(
|
|
rows,
|
|
headers,
|
|
separate_rows=True,
|
|
)
|