mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-16 06:53:47 +00:00
Split safety into (llama-guard, prompt-guard, code-scanner)
This commit is contained in:
parent
6d38b1690b
commit
fdaec91747
14 changed files with 295 additions and 368 deletions
25
llama_stack/providers/inline/safety/prompt_guard/config.py
Normal file
25
llama_stack/providers/inline/safety/prompt_guard/config.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
|
||||
|
||||
class PromptGuardType(Enum):
|
||||
injection = "injection"
|
||||
jailbreak = "jailbreak"
|
||||
|
||||
|
||||
class PromptGuardConfig(BaseModel):
|
||||
guard_type: str = PromptGuardType.injection.value
|
||||
|
||||
@classmethod
|
||||
@field_validator("guard_type")
|
||||
def validate_guard_type(cls, v):
|
||||
if v not in [t.value for t in PromptGuardType]:
|
||||
raise ValueError(f"Unknown prompt guard type: {v}")
|
||||
return v
|
Loading…
Add table
Add a link
Reference in a new issue