forked from phoenix-oss/llama-stack-mirror
Add special case for prompt guard
This commit is contained in:
parent
a80b707ff8
commit
cc5029a716
4 changed files with 76 additions and 13 deletions
|
@ -38,9 +38,6 @@ class Download(Subcommand):
|
|||
|
||||
|
||||
def setup_download_parser(parser: argparse.ArgumentParser) -> None:
|
||||
from llama_models.sku_list import all_registered_models
|
||||
|
||||
models = all_registered_models()
|
||||
parser.add_argument(
|
||||
"--source",
|
||||
choices=["meta", "huggingface"],
|
||||
|
@ -123,16 +120,12 @@ def _hf_download(
|
|||
print(f"\nSuccessfully downloaded model to {true_output_dir}")
|
||||
|
||||
|
||||
def _meta_download(model: "Model", meta_url: str):
|
||||
from llama_models.sku_list import llama_meta_net_info
|
||||
|
||||
def _meta_download(model: "Model", meta_url: str, info: "LlamaDownloadInfo"):
|
||||
from llama_stack.distribution.utils.model_utils import model_local_dir
|
||||
|
||||
output_dir = Path(model_local_dir(model.descriptor()))
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
info = llama_meta_net_info(model)
|
||||
|
||||
# I believe we can use some concurrency here if needed but not sure it is worth it
|
||||
for f in info.files:
|
||||
output_file = str(output_dir / f)
|
||||
|
@ -147,7 +140,9 @@ def _meta_download(model: "Model", meta_url: str):
|
|||
|
||||
|
||||
def run_download_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
|
||||
from llama_models.sku_list import resolve_model
|
||||
from llama_models.sku_list import llama_meta_net_info, resolve_model
|
||||
|
||||
from .model.safety_models import prompt_guard_download_info, prompt_guard_model_sku
|
||||
|
||||
if args.manifest_file:
|
||||
_download_from_manifest(args.manifest_file)
|
||||
|
@ -157,7 +152,14 @@ def run_download_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
|
|||
parser.error("Please provide a model id")
|
||||
return
|
||||
|
||||
model = resolve_model(args.model_id)
|
||||
prompt_guard = prompt_guard_model_sku()
|
||||
if args.model_id == prompt_guard.model_id:
|
||||
model = prompt_guard
|
||||
info = prompt_guard_download_info()
|
||||
else:
|
||||
model = resolve_model(args.model_id)
|
||||
info = llama_meta_net_info(model)
|
||||
|
||||
if model is None:
|
||||
parser.error(f"Model {args.model_id} not found")
|
||||
return
|
||||
|
@ -171,7 +173,7 @@ def run_download_cmd(args: argparse.Namespace, parser: argparse.ArgumentParser):
|
|||
"Please provide the signed URL you received via email (e.g., https://llama3-1.llamameta.net/*?Policy...): "
|
||||
)
|
||||
assert meta_url is not None and "llamameta.net" in meta_url
|
||||
_meta_download(model, meta_url)
|
||||
_meta_download(model, meta_url, info)
|
||||
|
||||
|
||||
class ModelEntry(BaseModel):
|
||||
|
|
|
@ -39,7 +39,14 @@ class ModelDescribe(Subcommand):
|
|||
)
|
||||
|
||||
def _run_model_describe_cmd(self, args: argparse.Namespace) -> None:
|
||||
model = resolve_model(args.model_id)
|
||||
from .safety_models import prompt_guard_model_sku
|
||||
|
||||
prompt_guard = prompt_guard_model_sku()
|
||||
if args.model_id == prompt_guard.model_id:
|
||||
model = prompt_guard
|
||||
else:
|
||||
model = resolve_model(args.model_id)
|
||||
|
||||
if model is None:
|
||||
self.parser.error(
|
||||
f"Model {args.model_id} not found; try 'llama model list' for a list of available models."
|
||||
|
|
|
@ -34,6 +34,8 @@ class ModelList(Subcommand):
|
|||
)
|
||||
|
||||
def _run_model_list_cmd(self, args: argparse.Namespace) -> None:
|
||||
from .safety_models import prompt_guard_model_sku
|
||||
|
||||
headers = [
|
||||
"Model Descriptor",
|
||||
"Hugging Face Repo",
|
||||
|
@ -41,7 +43,7 @@ class ModelList(Subcommand):
|
|||
]
|
||||
|
||||
rows = []
|
||||
for model in all_registered_models():
|
||||
for model in all_registered_models() + [prompt_guard_model_sku()]:
|
||||
if not args.show_all and not model.is_featured:
|
||||
continue
|
||||
|
||||
|
|
52
llama_stack/cli/model/safety_models.py
Normal file
52
llama_stack/cli/model/safety_models.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from llama_models.datatypes import * # noqa: F403
|
||||
from llama_models.sku_list import LlamaDownloadInfo
|
||||
|
||||
|
||||
class PromptGuardModel(BaseModel):
|
||||
"""Make a 'fake' Model-like object for Prompt Guard. Eventually this will be removed."""
|
||||
|
||||
model_id: str = "Prompt-Guard-86M"
|
||||
description: str = (
|
||||
"Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
|
||||
)
|
||||
is_featured: bool = False
|
||||
huggingface_repo: str = "meta-llama/Prompt-Guard-86M"
|
||||
max_seq_length: int = 2048
|
||||
is_instruct_model: bool = False
|
||||
quantization_format: CheckpointQuantizationFormat = (
|
||||
CheckpointQuantizationFormat.bf16
|
||||
)
|
||||
arch_args: Dict[str, Any] = Field(default_factory=dict)
|
||||
recommended_sampling_params: Optional[SamplingParams] = None
|
||||
|
||||
def descriptor(self) -> str:
|
||||
return self.model_id
|
||||
|
||||
model_config = ConfigDict(protected_namespaces=())
|
||||
|
||||
|
||||
def prompt_guard_model_sku():
|
||||
return PromptGuardModel()
|
||||
|
||||
|
||||
def prompt_guard_download_info():
|
||||
return LlamaDownloadInfo(
|
||||
folder="Prompt-Guard",
|
||||
files=[
|
||||
"model.safetensors",
|
||||
"special_tokens_map.json",
|
||||
"tokenizer.json",
|
||||
"tokenizer_config.json",
|
||||
],
|
||||
pth_size=1,
|
||||
)
|
Loading…
Add table
Add a link
Reference in a new issue