forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Move around bits. This makes the copies from llama-models _much_ easier to maintain and ensures we don't entangle meta-reference specific tidbits into llama-models code even by accident. Also, kills the meta-reference-quantized-gpu distro and rolls quantization deps into meta-reference-gpu. ## Test Plan ``` LLAMA_MODELS_DEBUG=1 \ with-proxy llama stack run meta-reference-gpu \ --env INFERENCE_MODEL=meta-llama/Llama-4-Scout-17B-16E-Instruct \ --env INFERENCE_CHECKPOINT_DIR=<DIR> \ --env MODEL_PARALLEL_SIZE=4 \ --env QUANTIZATION_TYPE=fp8_mixed ``` Start a server with and without quantization. Point integration tests to it using: ``` pytest -s -v tests/integration/inference/test_text_inference.py \ --stack-config http://localhost:8321 --text-model meta-llama/Llama-4-Scout-17B-16E-Instruct ```
47 lines
1.4 KiB
Python
47 lines
1.4 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any, Dict
|
|
|
|
from pydantic import BaseModel, ConfigDict, Field
|
|
|
|
from llama_stack.models.llama.sku_list import LlamaDownloadInfo
|
|
from llama_stack.models.llama.sku_types import CheckpointQuantizationFormat
|
|
|
|
|
|
class PromptGuardModel(BaseModel):
|
|
"""Make a 'fake' Model-like object for Prompt Guard. Eventually this will be removed."""
|
|
|
|
model_id: str = "Prompt-Guard-86M"
|
|
description: str = "Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
|
|
is_featured: bool = False
|
|
huggingface_repo: str = "meta-llama/Prompt-Guard-86M"
|
|
max_seq_length: int = 2048
|
|
is_instruct_model: bool = False
|
|
quantization_format: CheckpointQuantizationFormat = CheckpointQuantizationFormat.bf16
|
|
arch_args: Dict[str, Any] = Field(default_factory=dict)
|
|
|
|
def descriptor(self) -> str:
|
|
return self.model_id
|
|
|
|
model_config = ConfigDict(protected_namespaces=())
|
|
|
|
|
|
def prompt_guard_model_sku():
|
|
return PromptGuardModel()
|
|
|
|
|
|
def prompt_guard_download_info():
|
|
return LlamaDownloadInfo(
|
|
folder="Prompt-Guard",
|
|
files=[
|
|
"model.safetensors",
|
|
"special_tokens_map.json",
|
|
"tokenizer.json",
|
|
"tokenizer_config.json",
|
|
],
|
|
pth_size=1,
|
|
)
|