mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
# What does this PR do? - Configured ruff linter to automatically fix import sorting issues. - Set --exit-non-zero-on-fix to ensure non-zero exit code when fixes are applied. - Enabled the 'I' selection to focus on import-related linting rules. - Ran the linter, and formatted all codebase imports accordingly. - Removed the black dep from the "dev" group since we use ruff Signed-off-by: Sébastien Han <seb@redhat.com> [//]: # (If resolving an issue, uncomment and update the line below) [//]: # (Closes #[issue-number]) ## Test Plan [Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] [//]: # (## Documentation) [//]: # (- [ ] Added a Changelog entry if the change is significant) Signed-off-by: Sébastien Han <seb@redhat.com>
48 lines
1.5 KiB
Python
48 lines
1.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Any, Dict, Optional
|
|
|
|
from llama_models.datatypes import CheckpointQuantizationFormat
|
|
from llama_models.llama3.api.datatypes import SamplingParams
|
|
from llama_models.sku_list import LlamaDownloadInfo
|
|
from pydantic import BaseModel, ConfigDict, Field
|
|
|
|
|
|
class PromptGuardModel(BaseModel):
|
|
"""Make a 'fake' Model-like object for Prompt Guard. Eventually this will be removed."""
|
|
|
|
model_id: str = "Prompt-Guard-86M"
|
|
description: str = "Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
|
|
is_featured: bool = False
|
|
huggingface_repo: str = "meta-llama/Prompt-Guard-86M"
|
|
max_seq_length: int = 2048
|
|
is_instruct_model: bool = False
|
|
quantization_format: CheckpointQuantizationFormat = CheckpointQuantizationFormat.bf16
|
|
arch_args: Dict[str, Any] = Field(default_factory=dict)
|
|
recommended_sampling_params: Optional[SamplingParams] = None
|
|
|
|
def descriptor(self) -> str:
|
|
return self.model_id
|
|
|
|
model_config = ConfigDict(protected_namespaces=())
|
|
|
|
|
|
def prompt_guard_model_sku():
|
|
return PromptGuardModel()
|
|
|
|
|
|
def prompt_guard_download_info():
|
|
return LlamaDownloadInfo(
|
|
folder="Prompt-Guard",
|
|
files=[
|
|
"model.safetensors",
|
|
"special_tokens_map.json",
|
|
"tokenizer.json",
|
|
"tokenizer_config.json",
|
|
],
|
|
pth_size=1,
|
|
)
|