forked from phoenix-oss/llama-stack-mirror
Fix precommit check after moving to ruff (#927)
Lint check in main branch is failing. This fixes the lint check after we moved to ruff in https://github.com/meta-llama/llama-stack/pull/921. We need to move to a `ruff.toml` file as well as fixing and ignoring some additional checks. Signed-off-by: Yuan Tang <terrytangyuan@gmail.com>
This commit is contained in:
parent
4773092dd1
commit
34ab7a3b6c
217 changed files with 981 additions and 2681 deletions
|
@ -47,33 +47,20 @@ class ModelPromptFormat(Subcommand):
|
|||
|
||||
# Only Llama 3.1 and 3.2 are supported
|
||||
supported_model_ids = [
|
||||
m
|
||||
for m in CoreModelId
|
||||
if model_family(m) in {ModelFamily.llama3_1, ModelFamily.llama3_2}
|
||||
m for m in CoreModelId if model_family(m) in {ModelFamily.llama3_1, ModelFamily.llama3_2}
|
||||
]
|
||||
model_str = "\n".join([m.value for m in supported_model_ids])
|
||||
try:
|
||||
model_id = CoreModelId(args.model_name)
|
||||
except ValueError:
|
||||
self.parser.error(
|
||||
f"{args.model_name} is not a valid Model. Choose one from --\n{model_str}"
|
||||
)
|
||||
self.parser.error(f"{args.model_name} is not a valid Model. Choose one from --\n{model_str}")
|
||||
|
||||
if model_id not in supported_model_ids:
|
||||
self.parser.error(
|
||||
f"{model_id} is not a valid Model. Choose one from --\n {model_str}"
|
||||
)
|
||||
self.parser.error(f"{model_id} is not a valid Model. Choose one from --\n {model_str}")
|
||||
|
||||
llama_3_1_file = (
|
||||
importlib.resources.files("llama_models") / "llama3_1/prompt_format.md"
|
||||
)
|
||||
llama_3_2_text_file = (
|
||||
importlib.resources.files("llama_models") / "llama3_2/text_prompt_format.md"
|
||||
)
|
||||
llama_3_2_vision_file = (
|
||||
importlib.resources.files("llama_models")
|
||||
/ "llama3_2/vision_prompt_format.md"
|
||||
)
|
||||
llama_3_1_file = importlib.resources.files("llama_models") / "llama3_1/prompt_format.md"
|
||||
llama_3_2_text_file = importlib.resources.files("llama_models") / "llama3_2/text_prompt_format.md"
|
||||
llama_3_2_vision_file = importlib.resources.files("llama_models") / "llama3_2/vision_prompt_format.md"
|
||||
if model_family(model_id) == ModelFamily.llama3_1:
|
||||
with importlib.resources.as_file(llama_3_1_file) as f:
|
||||
content = f.open("r").read()
|
||||
|
|
|
@ -17,16 +17,12 @@ class PromptGuardModel(BaseModel):
|
|||
"""Make a 'fake' Model-like object for Prompt Guard. Eventually this will be removed."""
|
||||
|
||||
model_id: str = "Prompt-Guard-86M"
|
||||
description: str = (
|
||||
"Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
|
||||
)
|
||||
description: str = "Prompt Guard. NOTE: this model will not be provided via `llama` CLI soon."
|
||||
is_featured: bool = False
|
||||
huggingface_repo: str = "meta-llama/Prompt-Guard-86M"
|
||||
max_seq_length: int = 2048
|
||||
is_instruct_model: bool = False
|
||||
quantization_format: CheckpointQuantizationFormat = (
|
||||
CheckpointQuantizationFormat.bf16
|
||||
)
|
||||
quantization_format: CheckpointQuantizationFormat = CheckpointQuantizationFormat.bf16
|
||||
arch_args: Dict[str, Any] = Field(default_factory=dict)
|
||||
recommended_sampling_params: Optional[SamplingParams] = None
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue